LLVM 23.0.0git
SpillUtils.cpp
Go to the documentation of this file.
1//===- SpillUtils.cpp - Utilities for checking for spills ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10#include "CoroInternal.h"
11#include "llvm/Analysis/CFG.h"
13#include "llvm/IR/CFG.h"
14#include "llvm/IR/DebugInfo.h"
15#include "llvm/IR/Dominators.h"
18
19using namespace llvm;
20using namespace llvm::coro;
21
23
25 // Structural coroutine intrinsics that should not be spilled into the
26 // coroutine frame.
28}
29
30/// Does control flow starting at the given block ever reach a suspend
31/// instruction before reaching a block in VisitedOrFreeBBs?
33 VisitedBlocksSet &VisitedOrFreeBBs) {
34 // Eagerly try to add this block to the visited set. If it's already
35 // there, stop recursing; this path doesn't reach a suspend before
36 // either looping or reaching a freeing block.
37 if (!VisitedOrFreeBBs.insert(From).second)
38 return false;
39
40 // We assume that we'll already have split suspends into their own blocks.
41 if (coro::isSuspendBlock(From))
42 return true;
43
44 // Recurse on the successors.
45 for (auto *Succ : successors(From)) {
46 if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
47 return true;
48 }
49
50 return false;
51}
52
53/// Is the given alloca "local", i.e. bounded in lifetime to not cross a
54/// suspend point?
56 // Seed the visited set with all the basic blocks containing a free
57 // so that we won't pass them up.
58 VisitedBlocksSet VisitedOrFreeBBs;
59 for (auto *User : AI->users()) {
61 VisitedOrFreeBBs.insert(FI->getParent());
62 }
63
64 return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
65}
66
67/// Turn the given coro.alloca.alloc call into a dynamic allocation.
68/// This happens during the all-instructions iteration, so it must not
69/// delete the call.
70static Instruction *
73 IRBuilder<> Builder(AI);
74 auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);
75
76 for (User *U : AI->users()) {
78 U->replaceAllUsesWith(Alloc);
79 } else {
80 auto FI = cast<CoroAllocaFreeInst>(U);
81 Builder.SetInsertPoint(FI);
82 Shape.emitDealloc(Builder, Alloc, nullptr);
83 }
84 DeadInsts.push_back(cast<Instruction>(U));
85 }
86
87 // Push this on last so that it gets deleted after all the others.
88 DeadInsts.push_back(AI);
89
90 // Return the new allocation value so that we can check for needed spills.
92}
93
94// We need to make room to insert a spill after initial PHIs, but before
95// catchswitch instruction. Placing it before violates the requirement that
96// catchswitch, like all other EHPads must be the first nonPHI in a block.
97//
98// Split away catchswitch into a separate block and insert in its place:
99//
100// cleanuppad <InsertPt> cleanupret.
101//
102// cleanupret instruction will act as an insert point for the spill.
104 BasicBlock *CurrentBlock = CatchSwitch->getParent();
105 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
106 CurrentBlock->getTerminator()->eraseFromParent();
107
108 auto *CleanupPad =
109 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
110 auto *CleanupRet =
111 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
112 return CleanupRet;
113}
114
115// We use a pointer use visitor to track how an alloca is being used.
116// The goal is to be able to answer the following three questions:
117// 1. Should this alloca be allocated on the frame instead.
118// 2. Could the content of the alloca be modified prior to CoroBegin, which
119// would require copying the data from the alloca to the frame after
120// CoroBegin.
121// 3. Are there any aliases created for this alloca prior to CoroBegin, but
122// used after CoroBegin. In that case, we will need to recreate the alias
123// after CoroBegin based off the frame.
124//
125// To answer question 1, we track two things:
126// A. List of all BasicBlocks that use this alloca or any of the aliases of
127// the alloca. In the end, we check if there exists any two basic blocks that
128// cross suspension points. If so, this alloca must be put on the frame.
129// B. Whether the alloca or any alias of the alloca is escaped at some point,
130// either by storing the address somewhere, or the address is used in a
131// function call that might capture. If it's ever escaped, this alloca must be
132// put on the frame conservatively.
133//
134// To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin.
135// Whenever a potential write happens, either through a store instruction, a
136// function call or any of the memory intrinsics, we check whether this
137// instruction is prior to CoroBegin.
138//
139// To answer question 3, we track the offsets of all aliases created for the
140// alloca prior to CoroBegin but used after CoroBegin. std::optional is used to
141// be able to represent the case when the offset is unknown (e.g. when you have
142// a PHINode that takes in different offset values). We cannot handle unknown
143// offsets and will assert. This is the potential issue left out. An ideal
144// solution would likely require a significant redesign.
145
146namespace {
147struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
148 using Base = PtrUseVisitor<AllocaUseVisitor>;
149 AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
150 const coro::Shape &CoroShape,
151 const SuspendCrossingInfo &Checker,
152 bool ShouldUseLifetimeStartInfo)
153 : PtrUseVisitor(DL), DT(DT), CoroShape(CoroShape), Checker(Checker),
154 ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {
155 for (AnyCoroSuspendInst *SuspendInst : CoroShape.CoroSuspends)
156 CoroSuspendBBs.insert(SuspendInst->getParent());
157 }
158
159 void visit(Instruction &I) {
160 Users.insert(&I);
161 Base::visit(I);
162 // If the pointer is escaped prior to CoroBegin, we have to assume it would
163 // be written into before CoroBegin as well.
164 if (PI.isEscaped() &&
165 !DT.dominates(CoroShape.CoroBegin, PI.getEscapingInst())) {
166 MayWriteBeforeCoroBegin = true;
167 }
168 }
169 // We need to provide this overload as PtrUseVisitor uses a pointer based
170 // visiting function.
171 void visit(Instruction *I) { return visit(*I); }
172
173 void visitPHINode(PHINode &I) {
174 enqueueUsers(I);
175 handleAlias(I);
176 }
177
178 void visitSelectInst(SelectInst &I) {
179 enqueueUsers(I);
180 handleAlias(I);
181 }
182
183 void visitCatchPadInst(CatchPadInst &I) {
184 // Windows EH requires exception objects allocated on the stack,
185 // shortcut the traversal and keep it on stack.
186 ShouldLiveOnFrame = false;
187 Base::Worklist.clear();
188 }
189
190 void visitInsertElementInst(InsertElementInst &I) {
191 enqueueUsers(I);
192 handleAlias(I);
193 }
194
195 void visitInsertValueInst(InsertValueInst &I) {
196 enqueueUsers(I);
197 handleAlias(I);
198 }
199
200 void visitStoreInst(StoreInst &SI) {
201 // Regardless whether the alias of the alloca is the value operand or the
202 // pointer operand, we need to assume the alloca is been written.
203 handleMayWrite(SI);
204
205 if (SI.getValueOperand() != U->get())
206 return;
207
208 // We are storing the pointer into a memory location, potentially escaping.
209 // As an optimization, we try to detect simple cases where it doesn't
210 // actually escape, for example:
211 // %ptr = alloca ..
212 // %addr = alloca ..
213 // store %ptr, %addr
214 // %x = load %addr
215 // ..
216 // If %addr is only used by loading from it, we could simply treat %x as
217 // another alias of %ptr, and not considering %ptr being escaped.
218 auto IsSimpleStoreThenLoad = [&]() {
219 auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand());
220 // If the memory location we are storing to is not an alloca, it
221 // could be an alias of some other memory locations, which is difficult
222 // to analyze.
223 if (!AI)
224 return false;
225 // StoreAliases contains aliases of the memory location stored into.
226 SmallVector<Instruction *, 4> StoreAliases = {AI};
227 while (!StoreAliases.empty()) {
228 Instruction *I = StoreAliases.pop_back_val();
229 for (User *U : I->users()) {
230 // If we are loading from the memory location, we are creating an
231 // alias of the original pointer.
232 if (auto *LI = dyn_cast<LoadInst>(U)) {
233 enqueueUsers(*LI);
234 handleAlias(*LI);
235 continue;
236 }
237 // If we are overriding the memory location, the pointer certainly
238 // won't escape.
239 if (auto *S = dyn_cast<StoreInst>(U))
240 if (S->getPointerOperand() == I)
241 continue;
243 continue;
244 // BitCastInst creats aliases of the memory location being stored
245 // into.
246 if (auto *BI = dyn_cast<BitCastInst>(U)) {
247 StoreAliases.push_back(BI);
248 continue;
249 }
250 return false;
251 }
252 }
253
254 return true;
255 };
256
257 if (!IsSimpleStoreThenLoad())
258 PI.setEscaped(&SI);
259 }
260
261 // All mem intrinsics modify the data.
262 void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); }
263
264 void visitBitCastInst(BitCastInst &BC) {
266 handleAlias(BC);
267 }
268
269 void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
271 handleAlias(ASC);
272 }
273
274 void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
275 // The base visitor will adjust Offset accordingly.
277 handleAlias(GEPI);
278 }
279
280 void visitIntrinsicInst(IntrinsicInst &II) {
281 switch (II.getIntrinsicID()) {
282 default:
284 case Intrinsic::lifetime_start:
285 LifetimeStarts.insert(&II);
286 LifetimeStartBBs.push_back(II.getParent());
287 break;
288 case Intrinsic::lifetime_end:
289 LifetimeEndBBs.insert(II.getParent());
290 break;
291 }
292 }
293
294 void visitCallBase(CallBase &CB) {
295 for (unsigned Op = 0, OpCount = CB.arg_size(); Op < OpCount; ++Op)
296 if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
297 PI.setEscaped(&CB);
298 handleMayWrite(CB);
299 }
300
301 bool getShouldLiveOnFrame() const {
302 if (!ShouldLiveOnFrame)
303 ShouldLiveOnFrame = computeShouldLiveOnFrame();
304 return *ShouldLiveOnFrame;
305 }
306
307 bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }
308
309 DenseMap<Instruction *, std::optional<APInt>> getAliasesCopy() const {
310 assert(getShouldLiveOnFrame() && "This method should only be called if the "
311 "alloca needs to live on the frame.");
312 for (const auto &P : AliasOffetMap)
313 if (!P.second)
314 report_fatal_error("Unable to handle an alias with unknown offset "
315 "created before CoroBegin.");
316 return AliasOffetMap;
317 }
318
319private:
320 const DominatorTree &DT;
321 const coro::Shape &CoroShape;
322 const SuspendCrossingInfo &Checker;
323 // All alias to the original AllocaInst, created before CoroBegin and used
324 // after CoroBegin. Each entry contains the instruction and the offset in the
325 // original Alloca. They need to be recreated after CoroBegin off the frame.
326 DenseMap<Instruction *, std::optional<APInt>> AliasOffetMap{};
327 SmallPtrSet<Instruction *, 4> Users{};
328 SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{};
329 SmallVector<BasicBlock *> LifetimeStartBBs{};
330 SmallPtrSet<BasicBlock *, 2> LifetimeEndBBs{};
331 SmallPtrSet<const BasicBlock *, 2> CoroSuspendBBs{};
332 bool MayWriteBeforeCoroBegin{false};
333 bool ShouldUseLifetimeStartInfo{true};
334
335 mutable std::optional<bool> ShouldLiveOnFrame{};
336
337 bool computeShouldLiveOnFrame() const {
338 // If lifetime information is available, we check it first since it's
339 // more precise. We look at every pair of lifetime.start intrinsic and
340 // every basic block that uses the pointer to see if they cross suspension
341 // points. The uses cover both direct uses as well as indirect uses.
342 if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) {
343 // If there is no explicit lifetime.end, then assume the address can
344 // cross suspension points.
345 if (LifetimeEndBBs.empty())
346 return true;
347
348 // If there is a path from a lifetime.start to a suspend without a
349 // corresponding lifetime.end, then the alloca's lifetime persists
350 // beyond that suspension point and the alloca must go on the frame.
351 llvm::SmallVector<BasicBlock *> Worklist(LifetimeStartBBs);
352 if (isManyPotentiallyReachableFromMany(Worklist, CoroSuspendBBs,
353 &LifetimeEndBBs, &DT))
354 return true;
355
356 // Addresses are guaranteed to be identical after every lifetime.start so
357 // we cannot use the local stack if the address escaped and there is a
358 // suspend point between lifetime markers. This should also cover the
359 // case of a single lifetime.start intrinsic in a loop with suspend point.
360 if (PI.isEscaped()) {
361 for (auto *A : LifetimeStarts) {
362 for (auto *B : LifetimeStarts) {
363 if (Checker.hasPathOrLoopCrossingSuspendPoint(A->getParent(),
364 B->getParent()))
365 return true;
366 }
367 }
368 }
369 return false;
370 }
371 // FIXME: Ideally the isEscaped check should come at the beginning.
372 // However there are a few loose ends that need to be fixed first before
373 // we can do that. We need to make sure we are not over-conservative, so
374 // that the data accessed in-between await_suspend and symmetric transfer
375 // is always put on the stack, and also data accessed after coro.end is
376 // always put on the stack (esp the return object). To fix that, we need
377 // to:
378 // 1) Potentially treat sret as nocapture in calls
379 // 2) Special handle the return object and put it on the stack
380 // 3) Utilize lifetime.end intrinsic
381 if (PI.isEscaped())
382 return true;
383
384 for (auto *U1 : Users)
385 for (auto *U2 : Users)
386 if (Checker.isDefinitionAcrossSuspend(*U1, U2))
387 return true;
388
389 return false;
390 }
391
392 void handleMayWrite(const Instruction &I) {
393 if (!DT.dominates(CoroShape.CoroBegin, &I))
394 MayWriteBeforeCoroBegin = true;
395 }
396
397 bool usedAfterCoroBegin(Instruction &I) {
398 for (auto &U : I.uses())
399 if (DT.dominates(CoroShape.CoroBegin, U))
400 return true;
401 return false;
402 }
403
404 void handleAlias(Instruction &I) {
405 // We track all aliases created prior to CoroBegin but used after.
406 // These aliases may need to be recreated after CoroBegin if the alloca
407 // need to live on the frame.
408 if (DT.dominates(CoroShape.CoroBegin, &I) || !usedAfterCoroBegin(I))
409 return;
410
411 if (!IsOffsetKnown) {
412 AliasOffetMap[&I].reset();
413 } else {
414 auto [Itr, Inserted] = AliasOffetMap.try_emplace(&I, Offset);
415 if (!Inserted && Itr->second && *Itr->second != Offset) {
416 // If we have seen two different possible values for this alias, we set
417 // it to empty.
418 Itr->second.reset();
419 }
420 }
421 }
422};
423} // namespace
424
426 const SuspendCrossingInfo &Checker,
428 const DominatorTree &DT) {
429 if (Shape.CoroSuspends.empty())
430 return;
431
432 // The PromiseAlloca will be specially handled since it needs to be in a
433 // fixed position in the frame.
435 return;
436
437 // The __coro_gro alloca should outlive the promise, make sure we
438 // keep it outside the frame.
439 if (AI->hasMetadata(LLVMContext::MD_coro_outside_frame))
440 return;
441
442 // The code that uses lifetime.start intrinsic does not work for functions
443 // with loops without exit. Disable it on ABIs we know to generate such
444 // code.
445 bool ShouldUseLifetimeStartInfo =
448 AllocaUseVisitor Visitor{AI->getDataLayout(), DT, Shape, Checker,
449 ShouldUseLifetimeStartInfo};
450 Visitor.visitPtr(*AI);
451 if (!Visitor.getShouldLiveOnFrame())
452 return;
453 Allocas.emplace_back(AI, Visitor.getAliasesCopy(),
454 Visitor.getMayWriteBeforeCoroBegin());
455}
456
458 const SuspendCrossingInfo &Checker) {
459 // Collect the spills for arguments and other not-materializable values.
460 for (Argument &A : F.args())
461 for (User *U : A.users())
462 if (Checker.isDefinitionAcrossSuspend(A, U))
463 Spills[&A].push_back(cast<Instruction>(U));
464}
465
467 SpillInfo &Spills, SmallVector<AllocaInfo, 8> &Allocas,
468 SmallVector<Instruction *, 4> &DeadInstructions,
470 const SuspendCrossingInfo &Checker, const DominatorTree &DT,
471 const coro::Shape &Shape) {
472
473 for (Instruction &I : instructions(F)) {
474 // Values returned from coroutine structure intrinsics should not be part
475 // of the Coroutine Frame.
477 continue;
478
479 // Handle alloca.alloc specially here.
480 if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
481 // Check whether the alloca's lifetime is bounded by suspend points.
482 if (isLocalAlloca(AI)) {
483 LocalAllocas.push_back(AI);
484 continue;
485 }
486
487 // If not, do a quick rewrite of the alloca and then add spills of
488 // the rewritten value. The rewrite doesn't invalidate anything in
489 // Spills because the other alloca intrinsics have no other operands
490 // besides AI, and it doesn't invalidate the iteration because we delay
491 // erasing AI.
492 auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);
493
494 for (User *U : Alloc->users()) {
495 if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
496 Spills[Alloc].push_back(cast<Instruction>(U));
497 }
498 continue;
499 }
500
501 // Ignore alloca.get; we process this as part of coro.alloca.alloc.
503 continue;
504
505 if (auto *AI = dyn_cast<AllocaInst>(&I)) {
506 collectFrameAlloca(AI, Shape, Checker, Allocas, DT);
507 continue;
508 }
509
510 for (User *U : I.users())
511 if (Checker.isDefinitionAcrossSuspend(I, U)) {
512 // We cannot spill a token.
513 if (I.getType()->isTokenTy())
515 "token definition is separated from the use by a suspend point");
516 Spills[&I].push_back(cast<Instruction>(U));
517 }
518 }
519}
520
522 const SuspendCrossingInfo &Checker) {
523 // We don't want the layout of coroutine frame to be affected
524 // by debug information. So we only choose to salvage dbg.values for
525 // whose value is already in the frame.
526 // We would handle the dbg.values for allocas specially
527 for (auto &Iter : Spills) {
528 auto *V = Iter.first;
530 findDbgValues(V, DVRs);
531 // Add the instructions which carry debug info that is in the frame.
532 for (DbgVariableRecord *DVR : DVRs)
533 if (Checker.isDefinitionAcrossSuspend(*V, DVR->Marker->MarkedInstr))
534 Spills[V].push_back(DVR->Marker->MarkedInstr);
535 }
536}
537
538/// Async and Retcon{Once} conventions assume that all spill uses can be sunk
539/// after the coro.begin intrinsic.
541 const DominatorTree &Dom, CoroBeginInst *CoroBegin, coro::SpillInfo &Spills,
545
546 // Collect all users that precede coro.begin.
547 auto collectUsers = [&](Value *Def) {
548 for (User *U : Def->users()) {
549 auto Inst = cast<Instruction>(U);
550 if (Inst->getParent() != CoroBegin->getParent() ||
551 Dom.dominates(CoroBegin, Inst))
552 continue;
553 if (ToMove.insert(Inst))
554 Worklist.push_back(Inst);
555 }
556 };
557 for (auto &I : Spills)
558 collectUsers(I.first);
559 for (auto &I : Allocas)
560 collectUsers(I.Alloca);
561
562 // Recursively collect users before coro.begin.
563 while (!Worklist.empty()) {
564 auto *Def = Worklist.pop_back_val();
565 for (User *U : Def->users()) {
566 auto Inst = cast<Instruction>(U);
567 if (Dom.dominates(CoroBegin, Inst))
568 continue;
569 if (ToMove.insert(Inst))
570 Worklist.push_back(Inst);
571 }
572 }
573
574 // Sort by dominance.
575 SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
576 llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool {
577 // If a dominates b it should precede (<) b.
578 return Dom.dominates(A, B);
579 });
580
581 Instruction *InsertPt = CoroBegin->getNextNode();
582 for (Instruction *Inst : InsertionList)
583 Inst->moveBefore(InsertPt->getIterator());
584}
585
587 Value *Def,
588 const DominatorTree &DT) {
589 BasicBlock::iterator InsertPt;
590 if (auto *Arg = dyn_cast<Argument>(Def)) {
591 // For arguments, we will place the store instruction right after
592 // the coroutine frame pointer instruction, i.e. coro.begin.
593 InsertPt = Shape.getInsertPtAfterFramePtr();
594
595 // If we're spilling an Argument, make sure we clear 'captures'
596 // from the coroutine function.
597 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::Captures);
598 } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
599 // Don't spill immediately after a suspend; splitting assumes
600 // that the suspend will be followed by a branch.
601 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHIIt();
602 } else {
603 auto *I = cast<Instruction>(Def);
604 if (!DT.dominates(Shape.CoroBegin, I)) {
605 // If it is not dominated by CoroBegin, then spill should be
606 // inserted immediately after CoroFrame is computed.
607 InsertPt = Shape.getInsertPtAfterFramePtr();
608 } else if (auto *II = dyn_cast<InvokeInst>(I)) {
609 // If we are spilling the result of the invoke instruction, split
610 // the normal edge and insert the spill in the new block.
611 auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest());
612 InsertPt = NewBB->getTerminator()->getIterator();
613 } else if (isa<PHINode>(I)) {
614 // Skip the PHINodes and EH pads instructions.
615 BasicBlock *DefBlock = I->getParent();
616 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
617 InsertPt = splitBeforeCatchSwitch(CSI)->getIterator();
618 else
619 InsertPt = DefBlock->getFirstInsertionPt();
620 } else {
621 assert(!I->isTerminator() && "unexpected terminator");
622 // For all other values, the spill is placed immediately after
623 // the definition.
624 InsertPt = I->getNextNode()->getIterator();
625 }
626 }
627
628 return InsertPt;
629}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
uint64_t IntrinsicInst * II
#define P(N)
This file provides a collection of visitors which walk the (instruction) uses of a pointer.
static void visit(BasicBlock &Start, std::function< bool(BasicBlock *)> op)
static bool isSuspendReachableFrom(BasicBlock *From, VisitedBlocksSet &VisitedOrFreeBBs)
Does control flow starting at the given block ever reach a suspend instruction before reaching a bloc...
static Instruction * splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch)
static bool isLocalAlloca(CoroAllocaAllocInst *AI)
Is the given alloca "local", i.e.
static bool isNonSpilledIntrinsic(Instruction &I)
static Instruction * lowerNonLocalAlloca(CoroAllocaAllocInst *AI, const Shape &Shape, SmallVectorImpl< Instruction * > &DeadInsts)
Turn the given coro.alloca.alloc call into a dynamic allocation.
SmallPtrSet< BasicBlock *, 8 > VisitedBlocksSet
static void collectFrameAlloca(AllocaInst *AI, const coro::Shape &Shape, const SuspendCrossingInfo &Checker, SmallVectorImpl< AllocaInfo > &Allocas, const DominatorTree &DT)
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
LLVM Basic Block Representation.
Definition BasicBlock.h:62
LLVM_ABI const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
LLVM_ABI BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="")
Split the basic block into two basic blocks at the specified instruction.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
bool doesNotCapture(unsigned OpNo) const
Determine whether this data operand is not captured.
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
Value * getParentPad() const
static CleanupPadInst * Create(Value *ParentPad, ArrayRef< Value * > Args={}, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
This represents the llvm.coro.alloca.alloc instruction.
Definition CoroInstr.h:772
Value * getSize() const
Definition CoroInstr.h:776
This class represents the llvm.coro.begin or llvm.coro.begin.custom.abi instructions.
Definition CoroInstr.h:475
Record of a variable value-assignment, aka a non instruction representation of the dbg....
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2811
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
LLVM_ABI void moveBefore(InstListType::iterator InsertPos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
A base class for visitors over the uses of a pointer value.
void visitGetElementPtrInst(GetElementPtrInst &GEPI)
void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC)
void visitIntrinsicInst(IntrinsicInst &II)
iterator end()
Get an iterator to the end of the SetVector.
Definition SetVector.h:112
iterator begin()
Get an iterator to the beginning of the SetVector.
Definition SetVector.h:106
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const
LLVM Value Representation.
Definition Value.h:75
iterator_range< user_iterator > users()
Definition Value.h:427
SmallVector< UseToVisit, 8 > Worklist
The worklist of to-visit uses.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
SmallMapVector< Value *, SmallVector< Instruction *, 2 >, 8 > SpillInfo
Definition SpillUtils.h:18
@ Async
The "async continuation" lowering, where each suspend point creates a single continuation function.
Definition CoroShape.h:48
@ RetconOnce
The "unique returned-continuation" lowering, where each suspend point creates a single continuation f...
Definition CoroShape.h:43
@ Retcon
The "returned-continuation" lowering, where each suspend point creates a single continuation function...
Definition CoroShape.h:36
BasicBlock::iterator getSpillInsertionPt(const coro::Shape &, Value *Def, const DominatorTree &DT)
bool isSuspendBlock(BasicBlock *BB)
void sinkSpillUsesAfterCoroBegin(const DominatorTree &DT, CoroBeginInst *CoroBegin, coro::SpillInfo &Spills, SmallVectorImpl< coro::AllocaInfo > &Allocas)
Async and Retcon{Once} conventions assume that all spill uses can be sunk after the coro....
void collectSpillsFromArgs(SpillInfo &Spills, Function &F, const SuspendCrossingInfo &Checker)
void collectSpillsFromDbgInfo(SpillInfo &Spills, Function &F, const SuspendCrossingInfo &Checker)
void collectSpillsAndAllocasFromInsts(SpillInfo &Spills, SmallVector< AllocaInfo, 8 > &Allocas, SmallVector< Instruction *, 4 > &DeadInstructions, SmallVector< CoroAllocaAllocInst *, 4 > &LocalAllocas, Function &F, const SuspendCrossingInfo &Checker, const DominatorTree &DT, const coro::Shape &Shape)
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool isManyPotentiallyReachableFromMany(SmallVectorImpl< BasicBlock * > &Worklist, const SmallPtrSetImpl< const BasicBlock * > &StopSet, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr, const CycleInfo *CI=nullptr)
Determine whether there is a potentially a path from at least one block in 'Worklist' to at least one...
Definition CFG.cpp:302
@ Offset
Definition DWP.cpp:532
LLVM_ABI void findDbgValues(Value *V, SmallVectorImpl< DbgVariableRecord * > &DbgVariableRecords)
Finds the dbg.values describing a value.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
auto successors(const MachineBasicBlock *BB)
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1636
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI BasicBlock * SplitEdge(BasicBlock *From, BasicBlock *To, DominatorTree *DT=nullptr, LoopInfo *LI=nullptr, MemorySSAUpdater *MSSAU=nullptr, const Twine &BBName="")
Split the edge connecting the specified blocks, and return the newly created basic block between From...
coro::ABI ABI
Definition CoroShape.h:98
SmallVector< AnyCoroSuspendInst *, 4 > CoroSuspends
Definition CoroShape.h:59
LLVM_ABI Value * emitAlloc(IRBuilder<> &Builder, Value *Size, CallGraph *CG) const
Allocate memory according to the rules of the active lowering.
SwitchLoweringStorage SwitchLowering
Definition CoroShape.h:140
CoroBeginInst * CoroBegin
Definition CoroShape.h:54
BasicBlock::iterator getInsertPtAfterFramePtr() const
Definition CoroShape.h:232
LLVM_ABI void emitDealloc(IRBuilder<> &Builder, Value *Ptr, CallGraph *CG) const
Deallocate memory according to the rules of the active lowering.