LLVM: lib/Transforms/Coroutines/CoroFrame.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
39#include
40#include
41
42using namespace llvm;
43
45
46#define DEBUG_TYPE "coro-frame"
47
48namespace {
49class FrameTypeBuilder;
50
51struct FrameDataInfo {
52
53
55
56
58
61 : Spills(Spills), Allocas(Allocas) {}
62
65 for (const auto &P : Spills)
67 for (const auto &A : Allocas)
69 return Defs;
70 }
71
73 auto Itr = FieldIndexMap.find(V);
74 assert(Itr != FieldIndexMap.end() &&
75 "Value does not have a frame field index");
76 return Itr->second;
77 }
78
80 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
81 "Cannot set the index for the same field twice.");
82 FieldIndexMap[V] = Index;
83 }
84
86 auto Iter = FieldAlignMap.find(V);
87 assert(Iter != FieldAlignMap.end());
88 return Iter->second;
89 }
90
92 assert(FieldAlignMap.count(V) == 0);
93 FieldAlignMap.insert({V, AL});
94 }
95
97 auto Iter = FieldDynamicAlignMap.find(V);
98 assert(Iter != FieldDynamicAlignMap.end());
99 return Iter->second;
100 }
101
103 assert(FieldDynamicAlignMap.count(V) == 0);
104 FieldDynamicAlignMap.insert({V, Align});
105 }
106
108 auto Iter = FieldOffsetMap.find(V);
109 assert(Iter != FieldOffsetMap.end());
110 return Iter->second;
111 }
112
114 assert(FieldOffsetMap.count(V) == 0);
115 FieldOffsetMap.insert({V, Offset});
116 }
117
118
119 void updateLayoutIndex(FrameTypeBuilder &B);
120
121private:
122
123
124 bool LayoutIndexUpdateStarted = false;
125
126
127
129
130
133
134
136};
137}
138
139#ifndef NDEBUG
141 dbgs() << "------------- " << Title << " --------------\n";
142 for (const auto &E : Spills) {
143 E.first->dump();
144 dbgs() << " user: ";
145 for (auto *I : E.second)
146 I->dump();
147 }
148}
149
151 dbgs() << "------------- Allocas --------------\n";
152 for (const auto &A : Allocas) {
153 A.Alloca->dump();
154 }
155}
156#endif
157
158namespace {
159using FieldIDType = size_t;
160
161
162
163
164class FrameTypeBuilder {
165private:
170 FieldIDType LayoutFieldIndex;
172 Align TyAlignment;
174 };
175
179 Align StructAlign;
180 bool IsFinished = false;
181
182 std::optional MaxFrameAlignment;
183
186
187public:
189 std::optional MaxFrameAlignment)
190 : DL(DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {}
191
192
193
194 [[nodiscard]] FieldIDType addFieldForAlloca(AllocaInst *AI,
195 bool IsHeader = false) {
197
198
200 if (auto *CI = dyn_cast(AI->getArraySize()))
201 Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
202 else
203 report_fatal_error("Coroutines cannot handle non static allocas yet");
204 }
205
206 return addField(Ty, AI->getAlign(), IsHeader);
207 }
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236 void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
238
239
240 [[nodiscard]] FieldIDType addField(Type *Ty, MaybeAlign MaybeFieldAlignment,
241 bool IsHeader = false,
242 bool IsSpillOfValue = false) {
243 assert(!IsFinished && "adding fields to a finished builder");
244 assert(Ty && "must provide a type for a field");
245
246
247 uint64_t FieldSize = DL.getTypeAllocSize(Ty);
248
249
250
251 if (FieldSize == 0) {
252 return 0;
253 }
254
255
256
257
258
259 Align ABIAlign = DL.getABITypeAlign(Ty);
260 Align TyAlignment = ABIAlign;
261 if (IsSpillOfValue && MaxFrameAlignment && *MaxFrameAlignment < ABIAlign)
262 TyAlignment = *MaxFrameAlignment;
263 Align FieldAlignment = MaybeFieldAlignment.value_or(TyAlignment);
264
265
266
267
268 uint64_t DynamicAlignBuffer = 0;
269 if (MaxFrameAlignment && (FieldAlignment > *MaxFrameAlignment)) {
270 DynamicAlignBuffer =
272 FieldAlignment = *MaxFrameAlignment;
273 FieldSize = FieldSize + DynamicAlignBuffer;
274 }
275
276
278 if (IsHeader) {
280 StructSize = Offset + FieldSize;
281
282
283 } else {
285 }
286
287 Fields.push_back({FieldSize, Offset, Ty, 0, FieldAlignment, TyAlignment,
288 DynamicAlignBuffer});
289 return Fields.size() - 1;
290 }
291
292
294
295 uint64_t getStructSize() const {
296 assert(IsFinished && "not yet finished!");
297 return StructSize;
298 }
299
300 Align getStructAlign() const {
301 assert(IsFinished && "not yet finished!");
302 return StructAlign;
303 }
304
305 FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
306 assert(IsFinished && "not yet finished!");
307 return Fields[Id].LayoutFieldIndex;
308 }
309
310 Field getLayoutField(FieldIDType Id) const {
311 assert(IsFinished && "not yet finished!");
312 return Fields[Id];
313 }
314};
315}
316
317void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
318 auto Updater = [&](Value *I) {
319 auto Field = B.getLayoutField(getFieldIndex(I));
320 setFieldIndex(I, Field.LayoutFieldIndex);
323 Field.DynamicAlignBuffer
325 : 0;
326 setDynamicAlign(I, dynamicAlign);
328 };
329 LayoutIndexUpdateStarted = true;
330 for (auto &S : Spills)
331 Updater(S.first);
332 for (const auto &A : Allocas)
333 Updater(A.Alloca);
334 LayoutIndexUpdateStarted = false;
335}
336
337void FrameTypeBuilder::addFieldForAllocas(const Function &F,
338 FrameDataInfo &FrameData,
340 bool OptimizeFrame) {
343
344
346 for (auto AllocaList : NonOverlapedAllocas) {
347 auto *LargestAI = *AllocaList.begin();
348 FieldIDType Id = addFieldForAlloca(LargestAI);
349 for (auto *Alloca : AllocaList)
350 FrameData.setFieldIndex(Alloca, Id);
351 }
352 });
353
354 if (!OptimizeFrame) {
355 for (const auto &A : FrameData.Allocas) {
357 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
358 }
359 return;
360 }
361
362
363
364
365
366
367
368
369
370
371
372
373
377 if (auto *ConstSWI = dyn_cast(U)) {
378 auto *SWI = const_cast<SwitchInst *>(ConstSWI);
379 DefaultSuspendDest[SWI] = SWI->getDefaultDest();
380 SWI->setDefaultDest(SWI->getSuccessor(1));
381 }
382 }
383 }
384
385 auto ExtractAllocas = [&]() {
386 AllocaSetType Allocas;
388 for (const auto &A : FrameData.Allocas)
389 Allocas.push_back(A.Alloca);
390 return Allocas;
391 };
392 StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
393 StackLifetime::LivenessType::May);
394 StackLifetimeAnalyzer.run();
395 auto DoAllocasInterfere = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
396 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
397 StackLifetimeAnalyzer.getLiveRange(AI2));
398 };
400 std::optional RetSize = A.Alloca->getAllocationSize(DL);
401 assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
402 assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
403 return RetSize->getFixedValue();
404 };
405
406
407
408
409 sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) {
410 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
411 });
412 for (const auto &A : FrameData.Allocas) {
414 bool Merged = false;
415
416
417
418 for (auto &AllocaSet : NonOverlapedAllocas) {
419 assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
420 bool NoInterference = none_of(AllocaSet, [&](auto Iter) {
421 return DoAllocasInterfere(Alloca, Iter);
422 });
423
424
425
426
427
428
429 bool Alignable = [&]() -> bool {
430 auto *LargestAlloca = *AllocaSet.begin();
431 return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
432 0;
433 }();
434 bool CouldMerge = NoInterference && Alignable;
435 if (!CouldMerge)
436 continue;
437 AllocaSet.push_back(Alloca);
438 Merged = true;
439 break;
440 }
441 if (!Merged) {
442 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
443 }
444 }
445
446
447 for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
448 SwitchInst *SWI = SwitchAndDefaultDest.first;
449 BasicBlock *DestBB = SwitchAndDefaultDest.second;
451 }
452
454 : NonOverlapedAllocas) {
455 if (AllocaSet.size() > 1) {
456 dbgs() << "In Function:" << F.getName() << "\n";
457 dbgs() << "Find Union Set "
458 << "\n";
459 dbgs() << "\tAllocas are \n";
460 for (auto Alloca : AllocaSet)
461 dbgs() << "\t\t" << *Alloca << "\n";
462 }
463 });
464}
465
467 assert(!IsFinished && "already finished!");
468
469
470
473 for (auto &Field : Fields) {
476 }
477
478
480 StructSize = SizeAndAlign.first;
481 StructAlign = SizeAndAlign.second;
482
484 return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
485 };
486
487
488
490 for (auto &LayoutField : LayoutFields) {
491 auto &F = getField(LayoutField);
492 if ((F.TyAlignment, LayoutField.Offset))
493 return true;
494 }
495 return false;
496 }();
497
498
500 FieldTypes.reserve(LayoutFields.size() * 3 / 2);
502 for (auto &LayoutField : LayoutFields) {
503 auto &F = getField(LayoutField);
504
505 auto Offset = LayoutField.Offset;
506
507
508
509
511 if (Offset != LastOffset) {
512 if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset)
514 Offset - LastOffset));
515 }
516
518 F.LayoutFieldIndex = FieldTypes.size();
519
521 if (F.DynamicAlignBuffer) {
523 ArrayType::get(Type::getInt8Ty(Context), F.DynamicAlignBuffer));
524 }
525 LastOffset = Offset + F.Size;
526 }
527
529
530#ifndef NDEBUG
531
532 auto Layout = DL.getStructLayout(Ty);
533 for (auto &F : Fields) {
535 assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset);
536 }
537#endif
538
539 IsFinished = true;
540
541 return Ty;
542}
543
546 for (auto *V : FrameData.getAllDefs()) {
548 continue;
549
550 auto CacheIt = [&DIVarCache, V](const auto &Container) {
553 });
554 if (I != Container.end())
555 DIVarCache.insert({V, (*I)->getVariable()});
556 };
559 }
560}
561
562
563
566
569 OS << "__int_" << cast(Ty)->getBitWidth();
571 return MDName->getString();
572 }
573
576 return "__float_";
578 return "__double_";
579 return "__floating_type_";
580 }
581
583 return "PointerType";
584
586 if (!cast(Ty)->hasName())
587 return "__LiteralStructType_";
588
590
592 for (auto &Iter : Buffer)
593 if (Iter == '.' || Iter == ':')
594 Iter = '_';
596 return MDName->getString();
597 }
598
599 return "UnknownType";
600}
601
604 unsigned LineNum,
607 return DT;
608
610
611 DIType *RetType = nullptr;
612
614 auto BitWidth = cast(Ty)->getBitWidth();
616 llvm::DINode::FlagArtificial);
619 dwarf::DW_ATE_float,
620 llvm::DINode::FlagArtificial);
622
623
624
625
626
627
628
629 RetType =
632 std::nullopt, Name);
637 llvm::DINode::FlagArtificial, nullptr, llvm::DINodeArray());
638
639 auto *StructTy = cast(Ty);
641 for (unsigned I = 0; I < StructTy->getNumElements(); I++) {
642 DIType *DITy = solveDIType(Builder, StructTy->getElementType(I), Layout,
643 Scope, LineNum, DITypeCache);
646 Scope, DITy->getName(), Scope->getFile(), LineNum,
649 llvm::DINode::FlagArtificial, DITy));
650 }
651
653
654 RetType = DIStruct;
655 } else {
656 LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n");
659 Name, 8, dwarf::DW_ATE_unsigned_char, llvm::DINode::FlagArtificial);
660
661 if (Size <= 8)
662 RetType = CharSizeType;
663 else {
664 if (Size % 8 != 0)
666
670 }
671 }
672
673 DITypeCache.insert({Ty, RetType});
674 return RetType;
675}
676
677
678
679
680
681
682
683
684
685
686
687
688
690 FrameDataInfo &FrameData) {
692
693
694
695 if (!DIS || !DIS->getUnit() ||
698 DIS->getUnit()->getEmissionKind() != DICompileUnit::DebugEmissionKind::FullDebug)
699 return;
700
701 assert(Shape.ABI == coro::ABI::Switch &&
702 "We could only build debug infomation for C++ coroutine now.\n");
703
704 DIBuilder DBuilder(*F.getParent(), false);
705
707 "Coroutine with switch ABI should own Promise alloca");
708
710 unsigned LineNum = DIS->getLine();
711
713 DIS->getUnit(), Twine(F.getName() + ".coro_frame_ty").str(),
714 DFile, LineNum, Shape.FrameSize * 8,
715 Shape.FrameAlign.value() * 8, llvm::DINode::FlagArtificial, nullptr,
716 llvm::DINodeArray());
720
723
727
729 NameCache.insert({ResumeIndex, "__resume_fn"});
730 NameCache.insert({DestroyIndex, "__destroy_fn"});
731 NameCache.insert({IndexIndex, "__coro_index"});
732
734 *DestroyFnTy = FrameTy->getElementType(DestroyIndex),
736
744
745
746
748 "__coro_index",
750 ? 8
752 dwarf::DW_ATE_unsigned_char)});
753
754 for (auto *V : FrameData.getAllDefs()) {
756 continue;
757
758 auto Index = FrameData.getFieldIndex(V);
759
760 NameCache.insert({Index, DIVarCache[V]->getName()});
761 TyCache.insert({Index, DIVarCache[V]->getType()});
762 }
763
764
766
767 OffsetCache.insert({ResumeIndex, {8, 0}});
768 OffsetCache.insert({DestroyIndex, {8, 8}});
770 {IndexIndex,
772
773 for (auto *V : FrameData.getAllDefs()) {
774 auto Index = FrameData.getFieldIndex(V);
775
777 {Index, {FrameData.getAlign(V).value(), FrameData.getOffset(V)}});
778 }
779
781
782
783
784
785 unsigned UnknownTypeNum = 0;
786 for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) {
787 if (!OffsetCache.contains(Index))
788 continue;
789
790 std::string Name;
794 DIType *DITy = nullptr;
795
797 assert(Ty->isSized() && "We can't handle type which is not sized.\n");
799 AlignInBits = OffsetCache[Index].first * 8;
800 OffsetInBits = OffsetCache[Index].second * 8;
801
802 if (auto It = NameCache.find(Index); It != NameCache.end()) {
803 Name = It->second.str();
804 DITy = TyCache[Index];
805 } else {
806 DITy = solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache);
807 assert(DITy && "SolveDIType shouldn't return nullptr.\n");
809 Name += "_" + std::to_string(UnknownTypeNum);
810 UnknownTypeNum++;
811 }
812
814 FrameDITy, Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits,
815 llvm::DINode::FlagArtificial, DITy));
816 }
817
819
820 auto *FrameDIVar =
822 FrameDITy, true, DINode::FlagArtificial);
823
824
825
826
827
828
829
830
831 auto RetainedNodes = DIS->getRetainedNodes();
833 RetainedNodes.end());
834 RetainedNodesVec.push_back(FrameDIVar);
836
837
838
840 DILocation::get(DIS->getContext(), LineNum, 1, DIS);
841 assert(FrameDIVar->isValidLocationForIntrinsic(DILoc));
842
847 DbgVariableRecord::LocationType::Declare);
849 It->getParent()->insertDbgRecordBefore(NewDVR, It);
850 } else {
851 DBuilder.insertDeclare(Shape.FramePtr, FrameDIVar,
854 }
855}
856
857
858
859
860
861
862
863
864
866 FrameDataInfo &FrameData,
867 bool OptimizeFrame) {
870
871
872 std::optional MaxFrameAlignment;
873 if (Shape.ABI == coro::ABI::Async)
875 FrameTypeBuilder B(C, DL, MaxFrameAlignment);
876
878 std::optional SwitchIndexFieldId;
879
880 if (Shape.ABI == coro::ABI::Switch) {
881 auto *FnPtrTy = PointerType::getUnqual(C);
882
883
884
885 (void)B.addField(FnPtrTy, std::nullopt, true);
886 (void)B.addField(FnPtrTy, std::nullopt, true);
887
888
889
890
891 if (PromiseAlloca)
892 FrameData.setFieldIndex(
893 PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, true));
894
895
896
899
900 SwitchIndexFieldId = B.addField(IndexType, std::nullopt);
901 } else {
902 assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
903 }
904
905
906
907 B.addFieldForAllocas(F, FrameData, Shape, OptimizeFrame);
908
909
910
911
912 if (Shape.ABI == coro::ABI::Switch && PromiseAlloca)
913
914
915 FrameData.Allocas.emplace_back(
917
918 for (auto &S : FrameData.Spills) {
919 Type *FieldType = S.first->getType();
920
921
922 if (const Argument *A = dyn_cast(S.first))
923 if (A->hasByValAttr())
924 FieldType = A->getParamByValType();
925 FieldIDType Id = B.addField(FieldType, std::nullopt, false ,
926 true );
927 FrameData.setFieldIndex(S.first, Id);
928 }
929
932 Name.append(".Frame");
934 }();
935
936 FrameData.updateLayoutIndex(B);
939
940 switch (Shape.ABI) {
941 case coro::ABI::Switch: {
942
943 auto IndexField = B.getLayoutField(*SwitchIndexFieldId);
947
948
949
951 break;
952 }
953
954
955 case coro::ABI::Retcon:
956 case coro::ABI::RetconOnce: {
959 = (B.getStructSize() <= Id->getStorageSize() &&
960 B.getStructAlign() <= Id->getStorageAlignment());
961 break;
962 }
963 case coro::ABI::Async: {
966
967
973 "The alignment requirment of frame variables cannot be higher than "
974 "the alignment of the async function context");
975 }
976 break;
977 }
978 }
979
980 return FrameTy;
981}
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1012
1013
1014
1015
1016 auto GetFramePointer = [&](Value *Orig) -> Value * {
1017 FieldIDType Index = FrameData.getFieldIndex(Orig);
1021 };
1022
1023 if (auto *AI = dyn_cast(Orig)) {
1024 if (auto *CI = dyn_cast(AI->getArraySize())) {
1025 auto Count = CI->getValue().getZExtValue();
1026 if (Count > 1) {
1028 }
1029 } else {
1030 report_fatal_error("Coroutines cannot handle non static allocas yet");
1031 }
1032 }
1033
1034 auto GEP = cast(
1036 if (auto *AI = dyn_cast(Orig)) {
1037 if (FrameData.getDynamicAlign(Orig) != 0) {
1040 auto *IntPtrTy = M->getDataLayout().getIntPtrType(AI->getType());
1042 auto *AlignMask =
1043 ConstantInt::get(IntPtrTy, AI->getAlign().value() - 1);
1044 PtrValue = Builder.CreateAdd(PtrValue, AlignMask);
1045 PtrValue = Builder.CreateAnd(PtrValue, Builder.CreateNot(AlignMask));
1047 }
1048
1049
1050
1051
1052
1053
1054
1055 if (GEP->getType() != Orig->getType())
1057 Orig->getName() + Twine(".cast"));
1058 }
1059 return GEP;
1060 };
1061
1062 for (auto const &E : FrameData.Spills) {
1063 Value *Def = E.first;
1064 auto SpillAlignment = Align(FrameData.getAlign(Def));
1065
1066
1068
1069 Type *ByValTy = nullptr;
1070 if (auto *Arg = dyn_cast(Def)) {
1071
1072
1073 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
1074
1075 if (Arg->hasByValAttr())
1076 ByValTy = Arg->getParamByValType();
1077 }
1078
1079 auto Index = FrameData.getFieldIndex(Def);
1080 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1082 FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
1083 if (ByValTy) {
1084
1085
1088 } else {
1090 }
1091
1093 Value *CurrentReload = nullptr;
1094 for (auto *U : E.second) {
1095
1096
1097
1098 if (CurrentBlock != U->getParent()) {
1099 CurrentBlock = U->getParent();
1102
1103 auto *GEP = GetFramePointer(E.first);
1104 GEP->setName(E.first->getName() + Twine(".reload.addr"));
1105 if (ByValTy)
1106 CurrentReload = GEP;
1107 else
1110 SpillAlignment, E.first->getName() + Twine(".reload"));
1111
1114
1115
1116
1117 if (F->getSubprogram()) {
1118 auto *CurDef = Def;
1119 while (DIs.empty() && DVRs.empty() && isa(CurDef)) {
1120 auto *LdInst = cast(CurDef);
1121
1122 if (LdInst->getPointerOperandType() != LdInst->getType())
1123 break;
1124 CurDef = LdInst->getPointerOperand();
1125 if (!isa<AllocaInst, LoadInst>(CurDef))
1126 break;
1129 }
1130 }
1131
1132 auto SalvageOne = [&](auto *DDI) {
1133 bool AllowUnresolved = false;
1134
1135
1136
1140 DDI->getExpression(), DDI->getDebugLoc(),
1141 DbgVariableRecord::LocationType::Declare);
1142 Builder.GetInsertPoint()->getParent()->insertDbgRecordBefore(
1144 } else {
1146 .insertDeclare(CurrentReload, DDI->getVariable(),
1147 DDI->getExpression(), DDI->getDebugLoc(),
1149 }
1150
1151
1153 };
1156 }
1157
1158
1159
1160
1161 if (auto *PN = dyn_cast(U)) {
1162 assert(PN->getNumIncomingValues() == 1 &&
1163 "unexpected number of incoming "
1164 "values in the PHINode");
1165 PN->replaceAllUsesWith(CurrentReload);
1166 PN->eraseFromParent();
1167 continue;
1168 }
1169
1170
1171
1172 U->replaceUsesOfWith(Def, CurrentReload);
1173
1174
1176 DVR.replaceVariableLocationOp(Def, CurrentReload, true);
1177 }
1178 }
1179
1181
1184 SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
1186
1187
1188 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
1189 Shape.ABI == coro::ABI::Async) {
1190
1191 Builder.SetInsertPoint(SpillBlock, SpillBlock->begin());
1192 for (const auto &P : FrameData.Allocas) {
1194 auto *G = GetFramePointer(Alloca);
1195
1196
1197
1198 G->takeName(Alloca);
1201 }
1202 return;
1203 }
1204
1205
1206
1207
1208
1209
1213 for (const auto &A : FrameData.Allocas) {
1215 UsersToUpdate.clear();
1216 for (User *U : Alloca->users()) {
1217 auto *I = cast(U);
1220 }
1221 if (UsersToUpdate.empty())
1222 continue;
1223 auto *G = GetFramePointer(Alloca);
1224 G->setName(Alloca->getName() + Twine(".reload.addr"));
1225
1228 findDbgUsers(DIs, Alloca, &DbgVariableRecords);
1229 for (auto *DVI : DIs)
1230 DVI->replaceUsesOfWith(Alloca, G);
1231 for (auto *DVR : DbgVariableRecords)
1232 DVR->replaceVariableLocationOp(Alloca, G);
1233
1235
1236
1237
1238 if (I->isLifetimeStartOrEnd()) {
1239 I->eraseFromParent();
1240 continue;
1241 }
1242
1243 I->replaceUsesOfWith(Alloca, G);
1244 }
1245 }
1247 for (const auto &A : FrameData.Allocas) {
1249 if (A.MayWriteBeforeCoroBegin) {
1250
1253 "Coroutines cannot handle copying of array allocas yet");
1254
1255 auto *G = GetFramePointer(Alloca);
1258 }
1259
1260
1261
1262 for (const auto &Alias : A.Aliases) {
1263 auto *FramePtr = GetFramePointer(Alloca);
1264 auto &Value = *Alias.second;
1266 auto *AliasPtr =
1270 }
1271 }
1272
1273
1274
1275
1276
1279
1280 bool HasAccessingPromiseBeforeCB = llvm::any_of(PA->uses(), [&](Use &U) {
1281 auto *Inst = dyn_cast(U.getUser());
1282 if (!Inst || DT.dominates(Shape.CoroBegin, Inst))
1283 return false;
1284
1285 if (auto *CI = dyn_cast(Inst)) {
1286
1287
1288
1289
1290 if (CI->onlyReadsMemory() ||
1291 CI->onlyReadsMemory(CI->getArgOperandNo(&U)))
1292 return false;
1293 return true;
1294 }
1295
1296 return isa(Inst) ||
1297
1298
1299 isa(Inst) ||
1300
1301
1302
1303
1304 isa(Inst);
1305 });
1306 if (HasAccessingPromiseBeforeCB) {
1308 auto *G = GetFramePointer(PA);
1309 auto *Value = Builder.CreateLoad(PA->getAllocatedType(), PA);
1311 }
1312 }
1313}
1314
1315
1316
1320 PHINode *UntilPHI = nullptr) {
1321 auto *PN = cast(&SuccBB->front());
1322 do {
1323 int Index = PN->getBasicBlockIndex(InsertedBB);
1324 Value *V = PN->getIncomingValue(Index);
1326 V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName());
1329 PN->setIncomingValue(Index, InputV);
1330 PN = dyn_cast(PN->getNextNode());
1331 } while (PN != UntilPHI);
1332}
1333
1334
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1369
1370
1371 auto *NewCleanupPadBB =
1373 CleanupPadBB->getName() + Twine(".corodispatch"),
1374 CleanupPadBB->getParent(), CleanupPadBB);
1376 auto *SwitchType = Builder.getInt8Ty();
1377 auto *SetDispatchValuePN =
1380 CleanupPad->insertAfter(SetDispatchValuePN);
1381 auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB,
1383
1384 int SwitchIndex = 0;
1387
1389 CleanupPadBB->getName() +
1390 Twine(".from.") + Pred->getName(),
1391 CleanupPadBB->getParent(), CleanupPadBB);
1393 CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
1394 Pred->getName());
1396 Builder.CreateBr(CleanupPadBB);
1398
1399
1400 setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB);
1401
1402
1403 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
1404 SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
1405 SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
1406 SwitchIndex++;
1407 }
1408}
1409
1412 for (auto &BB : F) {
1413 for (auto &Phi : BB.phis()) {
1414 if (Phi.getNumIncomingValues() == 1) {
1416 } else
1417 break;
1418 }
1419 }
1420 while (!Worklist.empty()) {
1422 auto *OriginalValue = Phi->getIncomingValue(0);
1423 Phi->replaceAllUsesWith(OriginalValue);
1424 }
1425}
1426
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451 if (auto *CleanupPad =
1452 dyn_cast_or_null(BB.getFirstNonPHI())) {
1456 dyn_cast(Pred->getTerminator())) {
1457
1458
1459 assert(CS->getUnwindDest() == &BB);
1460 (void)CS;
1462 return;
1463 }
1464 }
1465 }
1466
1468 PHINode *ReplPHI = nullptr;
1469 if ((LandingPad = dyn_cast_or_null(BB.getFirstNonPHI()))) {
1470
1471
1472
1475 ReplPHI->takeName(LandingPad);
1477
1478
1479 }
1480
1483 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
1484 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
1485
1486
1487
1489 }
1490
1491 if (LandingPad) {
1492
1493
1495 }
1496}
1497
1500
1502 if (auto *PN = dyn_cast(&BB.front()))
1503 if (PN->getNumIncomingValues() > 1)
1505
1508}
1509
1510
1511
1513 auto *BB = I->getParent();
1514 if (&BB->front() == I) {
1515 if (BB->getSinglePredecessor()) {
1516 BB->setName(Name);
1517 return BB;
1518 }
1519 }
1520 return BB->splitBasicBlock(I, Name);
1521}
1522
1523
1524
1528}
1529
1530
1531
1533 unsigned depth = 3) {
1534
1535
1536 if (depth == 0) return false;
1537
1538
1540 return true;
1541
1542
1545 return false;
1546 }
1547
1548
1549 return true;
1550}
1551
1553
1554
1555
1556 for (auto *U : AI->users()) {
1557 auto FI = dyn_cast(U);
1558 if (!FI) continue;
1559
1561 return true;
1562 }
1563
1564
1565 return false;
1566}
1567
1568
1569
1572 for (auto *AI : LocalAllocas) {
1574
1575
1576
1577 Value *StackSave = nullptr;
1580
1581
1584
1585 for (auto *U : AI->users()) {
1586
1587 if (isa(U)) {
1588 U->replaceAllUsesWith(Alloca);
1589
1590
1591
1592
1593 } else {
1594 auto FI = cast(U);
1595 if (StackSave) {
1598 }
1599 }
1600 DeadInsts.push_back(cast(U));
1601 }
1602
1604 }
1605}
1606
1607
1610
1611 auto FnTy = FunctionType::get(ValueTy, {}, false);
1613
1614 auto Call = Builder.CreateCall(FnTy, Fn, {});
1616
1617 return Call;
1618}
1619
1620
1621
1622
1625
1626 auto FnTy = FunctionType::get(Builder.getPtrTy(),
1627 {V->getType()}, false);
1629
1630 auto Call = Builder.CreateCall(FnTy, Fn, { V });
1632
1633 return Call;
1634}
1635
1636
1637
1638
1639
1640
1646
1647
1648
1649 auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
1651
1652
1653
1654
1655 if (isa(Call)) {
1657 } else {
1658 auto Invoke = cast(Call);
1659 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
1660 }
1661
1662
1664 Builder.CreateStore(ValueAfterCall, Alloca);
1665
1666 return Addr;
1667}
1668
1669
1670
1674
1675
1677 if (isa(User) || isa(User))
1678 continue;
1679
1680 assert(isa(User) || isa(User));
1681 auto Call = cast(User);
1682
1684
1685
1687 }
1688
1689
1691}
1692
1693
1694
1695
1696
1700 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
1701
1702 auto ArgTy = cast(Arg.getType());
1703 auto ValueTy = PointerType::getUnqual(F.getContext());
1704
1705
1706
1707
1708 auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
1710
1711
1713 Builder.CreateStore(InitialValue, Alloca);
1714
1715
1718 }
1719
1720
1723 auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
1725 }
1726
1727
1728 AllocasToPromote.push_back(Alloca);
1730}
1731
1732
1733
1736
1737
1738 for (auto &Arg : F.args()) {
1739 if (!Arg.hasSwiftErrorAttr()) continue;
1740
1742 break;
1743 }
1744
1745
1746 for (auto &Inst : F.getEntryBlock()) {
1747 auto Alloca = dyn_cast(&Inst);
1748 if (!Alloca || !Alloca->isSwiftError()) continue;
1749
1750
1752
1753 AllocasToPromote.push_back(Alloca);
1755 }
1756
1757
1758
1759 if (!AllocasToPromote.empty()) {
1762 }
1763}
1764
1765
1766
1767
1768
1772 if (F.hasOptNone())
1773 return;
1774
1775
1777 DomSet.insert(&F.getEntryBlock());
1782 "should have split coro.suspend into its own block");
1784 }
1785
1787 AllocaInst* AI = dyn_cast(&I);
1788 if (!AI)
1789 continue;
1790
1792 bool Valid = true;
1794
1796 if (auto* II = dyn_cast(I))
1797 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1798 return false;
1799 };
1800
1804 return true;
1805 }
1806 if (!U->hasOneUse() || U->stripPointerCasts() != AI)
1807 return false;
1809 Lifetimes.push_back(U->user_back());
1810 return true;
1811 }
1812 return false;
1813 };
1814
1817
1818
1819
1820
1823
1824
1825 if (collectLifetimeStart(UI, AI))
1826 continue;
1827 Valid = false;
1828 break;
1829 }
1830 }
1831
1832
1833 if (Valid && Lifetimes.size() != 0) {
1834 auto *NewLifetime = Lifetimes[0]->clone();
1835 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), AI);
1836 NewLifetime->insertBefore(DomBB->getTerminator());
1837
1838
1840 S->eraseFromParent();
1841
1842 break;
1843 }
1844 }
1845 }
1846}
1847
1848static std::optional<std::pair<Value &, DIExpression &>>
1851 DIExpression *Expr, bool SkipOutermostLoad) {
1853 auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
1854 while (isa(InsertPt))
1855 ++InsertPt;
1857
1858 while (auto *Inst = dyn_cast_or_null(Storage)) {
1859 if (auto *LdInst = dyn_cast(Inst)) {
1860 Storage = LdInst->getPointerOperand();
1861
1862
1863
1864
1865
1866
1867 if (!SkipOutermostLoad)
1869 } else if (auto *StInst = dyn_cast(Inst)) {
1870 Storage = StInst->getValueOperand();
1871 } else {
1876 AdditionalValues);
1877 if ( || !AdditionalValues.empty()) {
1878
1879
1880 break;
1881 }
1882 Storage = Op;
1884 }
1885 SkipOutermostLoad = false;
1886 }
1887 if (!Storage)
1888 return std::nullopt;
1889
1890 auto *StorageAsArg = dyn_cast(Storage);
1891 const bool IsSwiftAsyncArg =
1892 StorageAsArg && StorageAsArg->hasAttribute(Attribute::SwiftAsync);
1893
1894
1895
1896
1897 if (IsSwiftAsyncArg && UseEntryValue && !Expr->isEntryValue() &&
1900
1901
1902
1903
1904
1905 if (StorageAsArg && !IsSwiftAsyncArg) {
1906 auto &Cached = ArgToAllocaMap[StorageAsArg];
1907 if (!Cached) {
1909 Storage->getName() + ".debug");
1911 }
1912 Storage = Cached;
1913
1914
1915
1916
1917
1918
1919
1921 }
1922
1924 return {{*Storage, *Expr}};
1925}
1926
1930
1932
1933
1934 bool SkipOutermostLoad = !isa(DVI);
1936
1937 auto SalvagedInfo =
1940 if (!SalvagedInfo)
1941 return;
1942
1943 Value *Storage = &SalvagedInfo->first;
1945
1948
1949
1950
1951 if (isa(DVI)) {
1952 std::optionalBasicBlock::iterator InsertPt;
1953 if (auto *I = dyn_cast(Storage)) {
1954 InsertPt = I->getInsertionPointAfterDef();
1955
1956 DebugLoc ILoc = I->getDebugLoc();
1958 if (ILoc && DVILoc &&
1959 DVILoc->getScope()->getSubprogram() ==
1960 ILoc->getScope()->getSubprogram())
1962 } else if (isa(Storage))
1963 InsertPt = F->getEntryBlock().begin();
1964 if (InsertPt)
1965 DVI.moveBefore(*(*InsertPt)->getParent(), *InsertPt);
1966 }
1967}
1968
1972
1974
1975
1976 bool SkipOutermostLoad = DVR.isDbgDeclare();
1978
1979 auto SalvagedInfo =
1982 if (!SalvagedInfo)
1983 return;
1984
1985 Value *Storage = &SalvagedInfo->first;
1987
1990
1991
1992
1993 if (DVR.getType() == DbgVariableRecord::LocationType::Declare) {
1994 std::optionalBasicBlock::iterator InsertPt;
1995 if (auto *I = dyn_cast(Storage)) {
1996 InsertPt = I->getInsertionPointAfterDef();
1997
1998 DebugLoc ILoc = I->getDebugLoc();
2000 if (ILoc && DVRLoc &&
2001 DVRLoc->getScope()->getSubprogram() ==
2002 ILoc->getScope()->getSubprogram())
2004 } else if (isa(Storage))
2005 InsertPt = F->getEntryBlock().begin();
2006 if (InsertPt) {
2008 (*InsertPt)->getParent()->insertDbgRecordBefore(&DVR, *InsertPt);
2009 }
2010 }
2011}
2012
2015
2018
2019 if (Shape.ABI == coro::ABI::Switch &&
2022 }
2023
2024
2025
2026
2028 if (auto *Save = CSI->getCoroSave())
2031 }
2032
2033
2036
2037
2038
2039
2040
2041 if (auto *AsyncEnd = dyn_cast(CE)) {
2042 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
2043 if (!MustTailCallFn)
2044 continue;
2049 AsyncEnd->getDebugLoc(), MustTailCallFn, TTI, Arguments, Builder);
2050 splitAround(Call, "MustTailCall.Before.CoroEnd");
2051 }
2052 }
2053
2054
2055
2057
2058
2059
2061}
2062
2066
2071
2072
2074
2076
2077
2082 LocalAllocas, F, Checker, DT, Shape);
2084
2087
2091
2092
2093 FrameDataInfo FrameData(Spills, Allocas);
2096
2098
2101
2102 for (auto *I : DeadInstructions)
2103 I->eraseFromParent();
2104}
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static void cleanupSinglePredPHIs(Function &F)
static std::optional< std::pair< Value &, DIExpression & > > salvageDebugInfoImpl(SmallDenseMap< Argument *, AllocaInst *, 4 > &ArgToAllocaMap, bool UseEntryValue, Function *F, Value *Storage, DIExpression *Expr, bool SkipOutermostLoad)
static void eliminateSwiftError(Function &F, coro::Shape &Shape)
Eliminate all problematic uses of swifterror arguments and allocas from the function.
static void lowerLocalAllocas(ArrayRef< CoroAllocaAllocInst * > LocalAllocas, SmallVectorImpl< Instruction * > &DeadInsts)
Turn each of the given local allocas into a normal (dynamic) alloca instruction.
static Value * emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V, coro::Shape &Shape)
Set the given value as the current swifterror value.
static Value * emitSetAndGetSwiftErrorValueAround(Instruction *Call, AllocaInst *Alloca, coro::Shape &Shape)
Set the swifterror value from the given alloca before a call, then put in back in the alloca afterwar...
static void cacheDIVar(FrameDataInfo &FrameData, DenseMap< Value *, DILocalVariable * > &DIVarCache)
static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI)
static void dumpAllocas(const SmallVectorImpl< coro::AllocaInfo > &Allocas)
static void splitAround(Instruction *I, const Twine &Name)
static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca, coro::Shape &Shape)
Eliminate a formerly-swifterror alloca by inserting the get/set intrinsics and attempting to MemToReg...
static void rewritePHIs(BasicBlock &BB)
static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB, BasicBlock *InsertedBB, BasicBlock *PredBB, PHINode *UntilPHI=nullptr)
static void dumpSpills(StringRef Title, const coro::SpillInfo &Spills)
static DIType * solveDIType(DIBuilder &Builder, Type *Ty, const DataLayout &Layout, DIScope *Scope, unsigned LineNum, DenseMap< Type *, DIType * > &DITypeCache)
static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB, unsigned depth=3)
After we split the coroutine, will the given basic block be along an obvious exit path for the resump...
static StructType * buildFrameType(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData, bool OptimizeFrame)
static void eliminateSwiftErrorArgument(Function &F, Argument &Arg, coro::Shape &Shape, SmallVectorImpl< AllocaInst * > &AllocasToPromote)
"Eliminate" a swifterror argument by reducing it to the alloca case and then loading and storing in t...
static void buildFrameDebugInfo(Function &F, coro::Shape &Shape, FrameDataInfo &FrameData)
Build artificial debug info for C++ coroutine frames to allow users to inspect the contents of the fr...
static BasicBlock * splitBlockIfNotFirst(Instruction *I, const Twine &Name)
static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB, CleanupPadInst *CleanupPad)
static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape, SuspendCrossingInfo &Checker, const DominatorTree &DT)
For each local variable that all of its user are only used inside one of suspended region,...
static StringRef solveTypeName(Type *Ty)
Create name for Type.
static Value * emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy, coro::Shape &Shape)
Get the current swifterror value.
cl::opt< bool > UseNewDbgInfoFormat
static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape)
Given that RA is a live value
static bool isLifetimeStart(const Instruction *Inst)
static MaybeAlign getAlign(Value *Ptr)
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
This file provides an interface for laying out a sequence of fields as a struct in a way that attempt...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getNumElements(Type *Ty)
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallString class.
static const unsigned FramePtr
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
void setSwiftError(bool V)
Specify whether this alloca is used to represent a swifterror.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const Instruction & front() const
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
LLVMContext & getContext() const
Get the context in which this basic block lives.
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This represents the llvm.coro.alloca.alloc instruction.
This represents the llvm.coro.suspend instruction.
DIDerivedType * createPointerType(DIType *PointeeTy, uint64_t SizeInBits, uint32_t AlignInBits=0, std::optional< unsigned > DWARFAddressSpace=std::nullopt, StringRef Name="", DINodeArray Annotations=nullptr)
Create debugging information entry for a pointer.
DIBasicType * createBasicType(StringRef Name, uint64_t SizeInBits, unsigned Encoding, DINode::DIFlags Flags=DINode::FlagZero, uint32_t NumExtraInhabitants=0)
Create debugging information entry for a basic type.
DISubrange * getOrCreateSubrange(int64_t Lo, int64_t Count)
Create a descriptor for a value range.
DICompositeType * createArrayType(uint64_t Size, uint32_t AlignInBits, DIType *Ty, DINodeArray Subscripts, PointerUnion< DIExpression *, DIVariable * > DataLocation=nullptr, PointerUnion< DIExpression *, DIVariable * > Associated=nullptr, PointerUnion< DIExpression *, DIVariable * > Allocated=nullptr, PointerUnion< DIExpression *, DIVariable * > Rank=nullptr)
Create debugging information entry for an array.
DINodeArray getOrCreateArray(ArrayRef< Metadata * > Elements)
Get a DINodeArray, create one if required.
DIExpression * createExpression(ArrayRef< uint64_t > Addr={})
Create a new descriptor for the specified variable which has a complex address expression for its add...
DIDerivedType * createMemberType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits, DINode::DIFlags Flags, DIType *Ty, DINodeArray Annotations=nullptr)
Create debugging information entry for a member.
DILocalVariable * createAutoVariable(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo, DIType *Ty, bool AlwaysPreserve=false, DINode::DIFlags Flags=DINode::FlagZero, uint32_t AlignInBits=0)
Create a new descriptor for an auto variable.
DICompositeType * createStructType(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber, uint64_t SizeInBits, uint32_t AlignInBits, DINode::DIFlags Flags, DIType *DerivedFrom, DINodeArray Elements, unsigned RunTimeLang=0, DIType *VTableHolder=nullptr, StringRef UniqueIdentifier="", DIType *Specification=nullptr, uint32_t NumExtraInhabitants=0)
Create debugging information entry for a struct.
void replaceArrays(DICompositeType *&T, DINodeArray Elements, DINodeArray TParams=DINodeArray())
Replace arrays on a composite type.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
DIExpression * foldConstantMath()
Try to shorten an expression with constant math operations that can be evaluated at compile time.
uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
bool isSingleLocationExpression() const
Return whether the evaluated expression makes use of a single location at the start of the expression...
Base class for scope-like contexts.
StringRef getName() const
uint64_t getSizeInBits() const
uint32_t getAlignInBits() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
DebugLoc getDebugLoc() const
void setDebugLoc(DebugLoc Loc)
This is the common base class for debug info intrinsics for variables.
void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
Value * getVariableLocationOp(unsigned OpIdx) const
void setExpression(DIExpression *NewExpr)
DIExpression * getExpression() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LocationType getType() const
void setExpression(DIExpression *NewExpr)
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
bool isDbgDeclare() const
void replaceVariableLocationOp(Value *OldValue, Value *NewValue, bool AllowEmpty=false)
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Module * getParent()
Get the module that this global value is contained inside of...
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
CallInst * CreateStackSave(const Twine &Name="")
Create a call to llvm.stacksave.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
UnreachableInst * CreateUnreachable()
BasicBlock::iterator GetInsertPoint() const
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
SwitchInst * CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases=10, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a switch instruction with the specified value, default dest, and with a hint for the number of...
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
BranchInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
CallInst * CreateStackRestore(Value *Ptr, const Twine &Name="")
Create a call to llvm.stackrestore.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
void removeFromParent()
This method unlinks 'this' from the containing basic block, but does not delete it.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
const Function * getFunction() const
Return the function this instruction belongs to.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
void insertAfter(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately after the specified instruction.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
This is an important class for using LLVM in a threaded context.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
void replaceOperandWith(unsigned I, Metadata *New)
Replace a specific operand.
LLVMContext & getContext() const
static MDString * get(LLVMContext &Context, StringRef Str)
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
StringRef str() const
Explicit conversion to StringRef.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Compute live ranges of allocas.
StringRef - Represent a constant reference to a string, i.e.
std::string str() const
str - Get the contents as an std::string.
TypeSize getElementOffsetInBits(unsigned Idx) const
Class to represent struct types.
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
unsigned getNumElements() const
Random access to the elements.
Type * getElementType(unsigned N) const
bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const
void setDefaultDest(BasicBlock *DefaultCase)
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TinyPtrVector - This class is specialized for cases where there are normally 0 or 1 element in a vect...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
std::string str() const
Return the twine contents as a std::string.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
StringRef getStructName() const
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isStructTy() const
True if this is an instance of StructType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< use_iterator > uses()
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
std::function< bool(Instruction &I)> IsMaterializable
virtual void buildCoroutineFrame(bool OptimizeFrame)
constexpr ScalarTy getFixedValue() const
const ParentTy * getParent() const
self_iterator getIterator()
A raw_ostream that writes to an SmallVector or SmallString.
@ C
The default llvm calling convention, compatible with C.
void salvageDebugInfo(SmallDenseMap< Argument *, AllocaInst *, 4 > &ArgToAllocaMap, DbgVariableIntrinsic &DVI, bool IsEntryPoint)
Attempts to rewrite the location operand of debug intrinsics in terms of the coroutine frame pointer,...
@ Async
The "async continuation" lowering, where each suspend point creates a single continuation function.
@ RetconOnce
The "unique returned-continuation" lowering, where each suspend point creates a single continuation f...
@ Retcon
The "returned-continuation" lowering, where each suspend point creates a single continuation function...
BasicBlock::iterator getSpillInsertionPt(const coro::Shape &, Value *Def, const DominatorTree &DT)
bool isSuspendBlock(BasicBlock *BB)
void normalizeCoroutine(Function &F, coro::Shape &Shape, TargetTransformInfo &TTI)
CallInst * createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, TargetTransformInfo &TTI, ArrayRef< Value * > Arguments, IRBuilder<> &)
void sinkSpillUsesAfterCoroBegin(const DominatorTree &DT, CoroBeginInst *CoroBegin, coro::SpillInfo &Spills, SmallVectorImpl< coro::AllocaInfo > &Allocas)
Async and Retcon{Once} conventions assume that all spill uses can be sunk after the coro....
void doRematerializations(Function &F, SuspendCrossingInfo &Checker, std::function< bool(Instruction &)> IsMaterializable)
void collectSpillsFromArgs(SpillInfo &Spills, Function &F, const SuspendCrossingInfo &Checker)
void collectSpillsFromDbgInfo(SpillInfo &Spills, Function &F, const SuspendCrossingInfo &Checker)
void collectSpillsAndAllocasFromInsts(SpillInfo &Spills, SmallVector< AllocaInfo, 8 > &Allocas, SmallVector< Instruction *, 4 > &DeadInstructions, SmallVector< CoroAllocaAllocInst *, 4 > &LocalAllocas, Function &F, const SuspendCrossingInfo &Checker, const DominatorTree &DT, const coro::Shape &Shape)
bool isCPlusPlus(SourceLanguage S)
This is an optimization pass for GlobalISel generic memory operations.
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
TinyPtrVector< DbgDeclareInst * > findDbgDeclares(Value *V)
Finds dbg.declare intrinsics declaring local variables as living in the memory that 'V' points to.
void PromoteMemToReg(ArrayRef< AllocaInst * > Allocas, DominatorTree &DT, AssumptionCache *AC=nullptr)
Promote the specified list of alloca instructions into scalar registers, inserting PHI nodes as appro...
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
unsigned Log2_64_Ceil(uint64_t Value)
Return the ceil log base 2 of the specified value, 64 if the value is zero.
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the debug info intrinsics describing a value.
auto successors(const MachineBasicBlock *BB)
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
auto pred_size(const MachineBasicBlock *BB)
bool isAllocaPromotable(const AllocaInst *AI)
Return true if this alloca is legal for promotion.
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
void sort(IteratorTy Start, IteratorTy End)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
BasicBlock * ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ, LandingPadInst *OriginalPad=nullptr, PHINode *LandingPadReplacement=nullptr, const CriticalEdgeSplittingOptions &Options=CriticalEdgeSplittingOptions(), const Twine &BBName="")
Split the edge connect the specficed blocks in the case that Succ is an Exception Handling Block.
Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
uint64_t offsetToAlignment(uint64_t Value, Align Alignment)
Returns the offset to the next integer (mod 2**64) that is greater than or equal to Value and is a mu...
std::pair< uint64_t, Align > performOptimizedStructLayout(MutableArrayRef< OptimizedStructLayoutField > Fields)
Compute a layout for a struct containing the given fields, making a best-effort attempt to minimize t...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred, BasicBlock *NewPred, PHINode *Until=nullptr)
Replaces all uses of OldPred with the NewPred block in all PHINodes in a block.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
TinyPtrVector< DbgVariableRecord * > findDVRDeclares(Value *V)
As above, for DVRDeclares.
auto predecessors(const MachineBasicBlock *BB)
void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ)
Sets the unwind edge of an instruction to a particular successor.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align Alignment
The required alignment of this field.
uint64_t Offset
The offset of this field in the final layout.
uint64_t Size
The required size of this field in bytes.
static constexpr uint64_t FlexibleOffset
A special value for Offset indicating that the field can be moved anywhere.
A MapVector that performs no allocations if smaller than a certain size.
Align getContextAlignment() const
uint64_t ContextHeaderSize
bool IsFrameInlineInStorage
AllocaInst * PromiseAlloca
AsyncLoweringStorage AsyncLowering
AnyCoroIdRetconInst * getRetconCoroId() const
CoroIdInst * getSwitchCoroId() const
SmallVector< AnyCoroSuspendInst *, 4 > CoroSuspends
AllocaInst * getPromiseAlloca() const
SwitchLoweringStorage SwitchLowering
CoroBeginInst * CoroBegin
BasicBlock::iterator getInsertPtAfterFramePtr() const
RetconLoweringStorage RetconLowering
SmallVector< AnyCoroEndInst *, 4 > CoroEnds
SmallVector< CallInst *, 2 > SwiftErrorOps
BasicBlock * AllocaSpillBlock