LLVM: lib/CodeGen/AtomicExpandPass.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
54#include
55#include
56#include
57
58using namespace llvm;
59
60#define DEBUG_TYPE "atomic-expand"
61
62namespace {
63
64class AtomicExpandImpl {
67
68private:
69 void handleFailure(Instruction &FailedInst, const Twine &Msg) const {
71
72
74
78 }
79
83 bool tryExpandAtomicLoad(LoadInst *LI);
84 bool expandAtomicLoadToLL(LoadInst *LI);
85 bool expandAtomicLoadToCmpXchg(LoadInst *LI);
88 void expandAtomicStoreToXChg(StoreInst *SI);
95 void expandAtomicOpToLLSC(
99 void expandPartwordAtomicRMW(
103 void expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI);
104 void expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI);
105
107 static Value *insertRMWCmpXchgLoop(
113
117
119 Value *PointerOperand, Value *ValueOperand,
123 void expandAtomicLoadToLibcall(LoadInst *LI);
124 void expandAtomicStoreToLibcall(StoreInst *LI);
127
128 friend bool
131
133
134public:
136};
137
138class AtomicExpandLegacy : public FunctionPass {
139public:
140 static char ID;
141
144 }
145
147};
148
149
150struct ReplacementIRBuilder
151 : IRBuilder<InstSimplifyFolder, IRBuilderCallbackInserter> {
152 MDNode *MMRAMD = nullptr;
153
154
155
160 SetInsertPoint(I);
161 this->CollectMetadataToCopy(I, {LLVMContext::MD_pcsections});
162 if (BB->getParent()->getAttributes().hasFnAttr(Attribute::StrictFP))
163 this->setIsFPConstrained(true);
164
165 MMRAMD = I->getMetadata(LLVMContext::MD_mmra);
166 }
167
170 I->setMetadata(LLVMContext::MD_mmra, MMRAMD);
171 }
172};
173
174}
175
176char AtomicExpandLegacy::ID = 0;
177
179
181 "Expand Atomic instructions", false, false)
185
186
189 return DL.getTypeStoreSize(LI->getType());
190}
191
194 return DL.getTypeStoreSize(SI->getValueOperand()->getType());
195}
196
201
206
207
211 Source.getAllMetadata(MD);
214
216 switch (ID) {
217 case LLVMContext::MD_dbg:
218 case LLVMContext::MD_tbaa:
219 case LLVMContext::MD_tbaa_struct:
220 case LLVMContext::MD_alias_scope:
221 case LLVMContext::MD_noalias:
222 case LLVMContext::MD_noalias_addrspace:
223 case LLVMContext::MD_access_group:
224 case LLVMContext::MD_mmra:
226 break;
227 default:
228 if (ID == Ctx.getMDKindID("amdgpu.no.remote.memory"))
230 else if (ID == Ctx.getMDKindID("amdgpu.no.fine.grained.memory"))
232
233
234
235 break;
236 }
237 }
238}
239
240
241
242
243template
246 Align Alignment = I->getAlign();
247 return Alignment >= Size &&
249}
250
251bool AtomicExpandImpl::processAtomicInstr(Instruction *I) {
256
257 bool MadeChange = false;
258
259
260 if (LI) {
261 if (!LI->isAtomic())
262 return false;
263
265 expandAtomicLoadToLibcall(LI);
266 return true;
267 }
268
270 TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
271 I = LI = convertAtomicLoadToIntegerType(LI);
272 MadeChange = true;
273 }
274 } else if (SI) {
275 if (->isAtomic())
276 return false;
277
279 expandAtomicStoreToLibcall(SI);
280 return true;
281 }
282
284 TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
285 I = SI = convertAtomicStoreToIntegerType(SI);
286 MadeChange = true;
287 }
288 } else if (RMWI) {
290 expandAtomicRMWToLibcall(RMWI);
291 return true;
292 }
293
295 TargetLoweringBase::AtomicExpansionKind::CastToInteger) {
296 I = RMWI = convertAtomicXchgToIntegerType(RMWI);
297 MadeChange = true;
298 }
299 } else if (CASI) {
301 expandAtomicCASToLibcall(CASI);
302 return true;
303 }
304
305
306
307 if (CASI->getCompareOperand()->getType()->isPointerTy()) {
308
309
310 I = CASI = convertCmpXchgToIntegerType(CASI);
311 MadeChange = true;
312 }
313 } else
314 return false;
315
317 auto FenceOrdering = AtomicOrdering::Monotonic;
319 FenceOrdering = LI->getOrdering();
320 LI->setOrdering(AtomicOrdering::Monotonic);
322 FenceOrdering = SI->getOrdering();
323 SI->setOrdering(AtomicOrdering::Monotonic);
326 FenceOrdering = RMWI->getOrdering();
327 RMWI->setOrdering(AtomicOrdering::Monotonic);
328 } else if (CASI &&
330 TargetLoweringBase::AtomicExpansionKind::None &&
334
335
336
337
338 FenceOrdering = CASI->getMergedOrdering();
340
341 CASI->setSuccessOrdering(CASOrdering);
342 CASI->setFailureOrdering(CASOrdering);
343 }
344
345 if (FenceOrdering != AtomicOrdering::Monotonic) {
346 MadeChange |= bracketInstWithFences(I, FenceOrdering);
347 }
348 } else if (I->hasAtomicStore() &&
350 auto FenceOrdering = AtomicOrdering::Monotonic;
351 if (SI)
352 FenceOrdering = SI->getOrdering();
353 else if (RMWI)
354 FenceOrdering = RMWI->getOrdering();
356 TargetLoweringBase::AtomicExpansionKind::LLSC)
357
358 FenceOrdering = CASI->getSuccessOrdering();
359
361 if (auto TrailingFence =
363 TrailingFence->moveAfter(I);
364 MadeChange = true;
365 }
366 }
367
368 if (LI)
369 MadeChange |= tryExpandAtomicLoad(LI);
370 else if (SI)
371 MadeChange |= tryExpandAtomicStore(SI);
372 else if (RMWI) {
373
374
375
376
377
378 if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) {
379 MadeChange = true;
380
381 } else {
382 MadeChange |= tryExpandAtomicRMW(RMWI);
383 }
384 } else if (CASI)
385 MadeChange |= tryExpandAtomicCmpXchg(CASI);
386
387 return MadeChange;
388}
389
390bool AtomicExpandImpl::run(Function &F, const TargetMachine *TM) {
392 if (!Subtarget->enableAtomicExpand())
393 return false;
394 TLI = Subtarget->getTargetLowering();
396
397 bool MadeChange = false;
398
401
403
408
409 if (processAtomicInstr(&Inst)) {
410 MadeChange = true;
411
412
413 BBE = F.end();
414 }
415 }
416 }
417
418 return MadeChange;
419}
420
421bool AtomicExpandLegacy::runOnFunction(Function &F) {
422
423 auto *TPC = getAnalysisIfAvailable();
424 if (!TPC)
425 return false;
426 auto *TM = &TPC->getTM();
427 AtomicExpandImpl AE;
428 return AE.run(F, TM);
429}
430
432 return new AtomicExpandLegacy();
433}
434
437 AtomicExpandImpl AE;
438
439 bool Changed = AE.run(F, TM);
442
444}
445
446bool AtomicExpandImpl::bracketInstWithFences(Instruction *I,
448 ReplacementIRBuilder Builder(I, *DL);
449
451
453
454
455 if (TrailingFence)
457
458 return (LeadingFence || TrailingFence);
459}
460
461
463AtomicExpandImpl::getCorrespondingIntegerType(Type *T, const DataLayout &DL) {
468}
469
470
471
472
473LoadInst *AtomicExpandImpl::convertAtomicLoadToIntegerType(LoadInst *LI) {
475 Type *NewTy = getCorrespondingIntegerType(LI->getType(), M->getDataLayout());
476
477 ReplacementIRBuilder Builder(LI, *DL);
478
480
481 auto *NewLI = Builder.CreateLoad(NewTy, Addr);
482 NewLI->setAlignment(LI->getAlign());
483 NewLI->setVolatile(LI->isVolatile());
485 LLVM_DEBUG(dbgs() << "Replaced " << *LI << " with " << *NewLI << "\n");
486
487 Value *NewVal = Builder.CreateBitCast(NewLI, LI->getType());
490 return NewLI;
491}
492
493AtomicRMWInst *
494AtomicExpandImpl::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
496
498 Type *NewTy =
499 getCorrespondingIntegerType(RMWI->getType(), M->getDataLayout());
500
501 ReplacementIRBuilder Builder(RMWI, *DL);
502
506 ? Builder.CreatePtrToInt(Val, NewTy)
507 : Builder.CreateBitCast(Val, NewTy);
508
509 auto *NewRMWI = Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, Addr, NewVal,
512 NewRMWI->setVolatile(RMWI->isVolatile());
514 LLVM_DEBUG(dbgs() << "Replaced " << *RMWI << " with " << *NewRMWI << "\n");
515
517 ? Builder.CreateIntToPtr(NewRMWI, RMWI->getType())
518 : Builder.CreateBitCast(NewRMWI, RMWI->getType());
521 return NewRMWI;
522}
523
524bool AtomicExpandImpl::tryExpandAtomicLoad(LoadInst *LI) {
526 case TargetLoweringBase::AtomicExpansionKind::None:
527 return false;
528 case TargetLoweringBase::AtomicExpansionKind::LLSC:
529 expandAtomicOpToLLSC(
532 [](IRBuilderBase &Builder, Value *Loaded) { return Loaded; });
533 return true;
534 case TargetLoweringBase::AtomicExpansionKind::LLOnly:
535 return expandAtomicLoadToLL(LI);
536 case TargetLoweringBase::AtomicExpansionKind::CmpXChg:
537 return expandAtomicLoadToCmpXchg(LI);
538 case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
539 LI->setAtomic(AtomicOrdering::NotAtomic);
540 return true;
541 case TargetLoweringBase::AtomicExpansionKind::CustomExpand:
543 return true;
544 default:
546 }
547}
548
549bool AtomicExpandImpl::tryExpandAtomicStore(StoreInst *SI) {
551 case TargetLoweringBase::AtomicExpansionKind::None:
552 return false;
553 case TargetLoweringBase::AtomicExpansionKind::CustomExpand:
555 return true;
556 case TargetLoweringBase::AtomicExpansionKind::Expand:
557 expandAtomicStoreToXChg(SI);
558 return true;
559 case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
560 SI->setAtomic(AtomicOrdering::NotAtomic);
561 return true;
562 default:
564 }
565}
566
567bool AtomicExpandImpl::expandAtomicLoadToLL(LoadInst *LI) {
568 ReplacementIRBuilder Builder(LI, *DL);
569
570
571
572
576
579
580 return true;
581}
582
583bool AtomicExpandImpl::expandAtomicLoadToCmpXchg(LoadInst *LI) {
584 ReplacementIRBuilder Builder(LI, *DL);
586 if (Order == AtomicOrdering::Unordered)
587 Order = AtomicOrdering::Monotonic;
588
592
593 Value *Pair = Builder.CreateAtomicCmpXchg(
594 Addr, DummyVal, DummyVal, LI->getAlign(), Order,
596 Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded");
597
600
601 return true;
602}
603
604
605
606
607
608
609
610
611
612StoreInst *AtomicExpandImpl::convertAtomicStoreToIntegerType(StoreInst *SI) {
613 ReplacementIRBuilder Builder(SI, *DL);
614 auto *M = SI->getModule();
615 Type *NewTy = getCorrespondingIntegerType(SI->getValueOperand()->getType(),
616 M->getDataLayout());
617 Value *NewVal = Builder.CreateBitCast(SI->getValueOperand(), NewTy);
618
619 Value *Addr = SI->getPointerOperand();
620
621 StoreInst *NewSI = Builder.CreateStore(NewVal, Addr);
624 NewSI->setAtomic(SI->getOrdering(), SI->getSyncScopeID());
625 LLVM_DEBUG(dbgs() << "Replaced " << *SI << " with " << *NewSI << "\n");
626 SI->eraseFromParent();
627 return NewSI;
628}
629
630void AtomicExpandImpl::expandAtomicStoreToXChg(StoreInst *SI) {
631
632
633
634
635
636
637 ReplacementIRBuilder Builder(SI, *DL);
639 assert(Ordering != AtomicOrdering::NotAtomic);
641 ? AtomicOrdering::Monotonic
643 AtomicRMWInst *AI = Builder.CreateAtomicRMW(
645 SI->getAlign(), RMWOrdering);
646 SI->eraseFromParent();
647
648
649 tryExpandAtomicRMW(AI);
650}
651
658
659
662 if (NeedBitcast) {
664 NewVal = Builder.CreateBitCast(NewVal, IntTy);
665 Loaded = Builder.CreateBitCast(Loaded, IntTy);
666 }
667
669 Addr, Loaded, NewVal, AddrAlign, MemOpOrder,
671 if (MetadataSrc)
673
674 Success = Builder.CreateExtractValue(Pair, 1, "success");
675 NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
676
677 if (NeedBitcast)
678 NewLoaded = Builder.CreateBitCast(NewLoaded, OrigTy);
679}
680
681bool AtomicExpandImpl::tryExpandAtomicRMW(AtomicRMWInst *AI) {
684 switch (Kind) {
685 case TargetLoweringBase::AtomicExpansionKind::None:
686 return false;
687 case TargetLoweringBase::AtomicExpansionKind::LLSC: {
690 if (ValueSize < MinCASSize) {
691 expandPartwordAtomicRMW(AI,
692 TargetLoweringBase::AtomicExpansionKind::LLSC);
693 } else {
694 auto PerformOp = [&](IRBuilderBase &Builder, Value *Loaded) {
697 };
700 }
701 return true;
702 }
703 case TargetLoweringBase::AtomicExpansionKind::CmpXChg: {
706 if (ValueSize < MinCASSize) {
707 expandPartwordAtomicRMW(AI,
708 TargetLoweringBase::AtomicExpansionKind::CmpXChg);
709 } else {
713 ? "system"
715 OptimizationRemarkEmitter ORE(AI->getFunction());
716 ORE.emit([&]() {
717 return OptimizationRemark(DEBUG_TYPE, "Passed", AI)
718 << "A compare and swap loop was generated for an atomic "
720 << MemScope << " memory scope";
721 });
723 }
724 return true;
725 }
726 case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic: {
729 if (ValueSize < MinCASSize) {
731
734 tryExpandAtomicRMW(widenPartwordAtomicRMW(AI));
735 return true;
736 }
737 }
738 expandAtomicRMWToMaskedIntrinsic(AI);
739 return true;
740 }
741 case TargetLoweringBase::AtomicExpansionKind::BitTestIntrinsic: {
743 return true;
744 }
745 case TargetLoweringBase::AtomicExpansionKind::CmpArithIntrinsic: {
747 return true;
748 }
749 case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
751 case TargetLoweringBase::AtomicExpansionKind::CustomExpand:
753 return true;
754 default:
756 }
757}
758
759namespace {
760
761struct PartwordMaskValues {
762
763 Type *WordType = nullptr;
765 Type *IntValueType = nullptr;
766 Value *AlignedAddr = nullptr;
767 Align AlignedAddrAlignment;
768
769 Value *ShiftAmt = nullptr;
770 Value *Mask = nullptr;
771 Value *Inv_Mask = nullptr;
772};
773
774[[maybe_unused]]
775raw_ostream &operator<<(raw_ostream &O, const PartwordMaskValues &PMV) {
776 auto PrintObj = [&O](auto *V) {
777 if (V)
779 else
780 O << "nullptr";
781 O << '\n';
782 };
783 O << "PartwordMaskValues {\n";
784 O << " WordType: ";
785 PrintObj(PMV.WordType);
786 O << " ValueType: ";
787 PrintObj(PMV.ValueType);
788 O << " AlignedAddr: ";
789 PrintObj(PMV.AlignedAddr);
790 O << " AlignedAddrAlignment: " << PMV.AlignedAddrAlignment.value() << '\n';
791 O << " ShiftAmt: ";
792 PrintObj(PMV.ShiftAmt);
793 O << " Mask: ";
794 PrintObj(PMV.Mask);
795 O << " Inv_Mask: ";
796 PrintObj(PMV.Inv_Mask);
797 O << "}\n";
798 return O;
799}
800
801}
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
822 unsigned MinWordSize) {
823 PartwordMaskValues PMV;
824
825 Module *M = I->getModule();
828 unsigned ValueSize = DL.getTypeStoreSize(ValueType);
829
830 PMV.ValueType = PMV.IntValueType = ValueType;
832 PMV.IntValueType =
834
835 PMV.WordType = MinWordSize > ValueSize ? Type::getIntNTy(Ctx, MinWordSize * 8)
837 if (PMV.ValueType == PMV.WordType) {
838 PMV.AlignedAddr = Addr;
839 PMV.AlignedAddrAlignment = AddrAlign;
840 PMV.ShiftAmt = ConstantInt::get(PMV.ValueType, 0);
841 PMV.Mask = ConstantInt::get(PMV.ValueType, ~0, true);
842 return PMV;
843 }
844
845 PMV.AlignedAddrAlignment = Align(MinWordSize);
846
847 assert(ValueSize < MinWordSize);
848
850 IntegerType *IntTy = DL.getIndexType(Ctx, PtrTy->getAddressSpace());
852
853 if (AddrAlign < MinWordSize) {
854 PMV.AlignedAddr = Builder.CreateIntrinsic(
855 Intrinsic::ptrmask, {PtrTy, IntTy},
856 {Addr, ConstantInt::get(IntTy, ~(uint64_t)(MinWordSize - 1))}, nullptr,
857 "AlignedAddr");
858
859 Value *AddrInt = Builder.CreatePtrToInt(Addr, IntTy);
860 PtrLSB = Builder.CreateAnd(AddrInt, MinWordSize - 1, "PtrLSB");
861 } else {
862
863 PMV.AlignedAddr = Addr;
865 }
866
867 if (DL.isLittleEndian()) {
868
869 PMV.ShiftAmt = Builder.CreateShl(PtrLSB, 3);
870 } else {
871
872 PMV.ShiftAmt = Builder.CreateShl(
873 Builder.CreateXor(PtrLSB, MinWordSize - ValueSize), 3);
874 }
875
876 PMV.ShiftAmt = Builder.CreateTrunc(PMV.ShiftAmt, PMV.WordType, "ShiftAmt");
877 PMV.Mask = Builder.CreateShl(
878 ConstantInt::get(PMV.WordType, (1 << (ValueSize * 8)) - 1), PMV.ShiftAmt,
879 "Mask");
880
881 PMV.Inv_Mask = Builder.CreateNot(PMV.Mask, "Inv_Mask");
882
883 return PMV;
884}
885
887 const PartwordMaskValues &PMV) {
888 assert(WideWord->getType() == PMV.WordType && "Widened type mismatch");
889 if (PMV.WordType == PMV.ValueType)
890 return WideWord;
891
892 Value *Shift = Builder.CreateLShr(WideWord, PMV.ShiftAmt, "shifted");
893 Value *Trunc = Builder.CreateTrunc(Shift, PMV.IntValueType, "extracted");
894 return Builder.CreateBitCast(Trunc, PMV.ValueType);
895}
896
898 Value *Updated, const PartwordMaskValues &PMV) {
899 assert(WideWord->getType() == PMV.WordType && "Widened type mismatch");
900 assert(Updated->getType() == PMV.ValueType && "Value type mismatch");
901 if (PMV.WordType == PMV.ValueType)
902 return Updated;
903
904 Updated = Builder.CreateBitCast(Updated, PMV.IntValueType);
905
906 Value *ZExt = Builder.CreateZExt(Updated, PMV.WordType, "extended");
908 Builder.CreateShl(ZExt, PMV.ShiftAmt, "shifted", true);
909 Value *And = Builder.CreateAnd(WideWord, PMV.Inv_Mask, "unmasked");
910 Value *Or = Builder.CreateOr(And, Shift, "inserted");
911 return Or;
912}
913
914
915
916
920 const PartwordMaskValues &PMV) {
921
922
923
924 switch (Op) {
926 Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask);
927 Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, Shifted_Inc);
928 return FinalVal;
929 }
933 llvm_unreachable("Or/Xor/And handled by widenPartwordAtomicRMW");
937
939 Value *NewVal_Masked = Builder.CreateAnd(NewVal, PMV.Mask);
940 Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask);
941 Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Masked);
942 return FinalVal;
943 }
958
959
960
964 return FinalVal;
965 }
966 default:
968 }
969}
970
971
972
973
974
975
976
977
978void AtomicExpandImpl::expandPartwordAtomicRMW(
980
984 tryExpandAtomicRMW(widenPartwordAtomicRMW(AI));
985 return;
986 }
989
990 ReplacementIRBuilder Builder(AI, *DL);
991
992 PartwordMaskValues PMV =
995
996 Value *ValOperand_Shifted = nullptr;
999 Value *ValOp = Builder.CreateBitCast(AI->getValOperand(), PMV.IntValueType);
1000 ValOperand_Shifted =
1001 Builder.CreateShl(Builder.CreateZExt(ValOp, PMV.WordType), PMV.ShiftAmt,
1002 "ValOperand_Shifted");
1003 }
1004
1005 auto PerformPartwordOp = [&](IRBuilderBase &Builder, Value *Loaded) {
1008 };
1009
1010 Value *OldResult;
1011 if (ExpansionKind == TargetLoweringBase::AtomicExpansionKind::CmpXChg) {
1012 OldResult = insertRMWCmpXchgLoop(
1013 Builder, PMV.WordType, PMV.AlignedAddr, PMV.AlignedAddrAlignment,
1015 } else {
1016 assert(ExpansionKind == TargetLoweringBase::AtomicExpansionKind::LLSC);
1017 OldResult = insertRMWLLSCLoop(Builder, PMV.WordType, PMV.AlignedAddr,
1018 PMV.AlignedAddrAlignment, MemOpOrder,
1019 PerformPartwordOp);
1020 }
1021
1025}
1026
1027
1028AtomicRMWInst *AtomicExpandImpl::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
1029 ReplacementIRBuilder Builder(AI, *DL);
1031
1034 "Unable to widen operation");
1035
1036 PartwordMaskValues PMV =
1039
1040 Value *ValOperand_Shifted =
1042 PMV.ShiftAmt, "ValOperand_Shifted");
1043
1044 Value *NewOperand;
1045
1047 NewOperand =
1048 Builder.CreateOr(ValOperand_Shifted, PMV.Inv_Mask, "AndOperand");
1049 else
1050 NewOperand = ValOperand_Shifted;
1051
1053 Op, PMV.AlignedAddr, NewOperand, PMV.AlignedAddrAlignment,
1055
1057
1061 return NewAI;
1062}
1063
1064bool AtomicExpandImpl::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1103
1106 ReplacementIRBuilder Builder(CI, *DL);
1107 LLVMContext &Ctx = Builder.getContext();
1108
1111 auto FailureBB =
1113 auto LoopBB = BasicBlock::Create(Ctx, "partword.cmpxchg.loop", F, FailureBB);
1114
1115
1116
1117 std::prev(BB->end())->eraseFromParent();
1119
1120 PartwordMaskValues PMV =
1123
1124
1125 Value *NewVal_Shifted =
1126 Builder.CreateShl(Builder.CreateZExt(NewVal, PMV.WordType), PMV.ShiftAmt);
1127 Value *Cmp_Shifted =
1129
1130
1131
1132 LoadInst *InitLoaded = Builder.CreateLoad(PMV.WordType, PMV.AlignedAddr);
1134 Value *InitLoaded_MaskOut = Builder.CreateAnd(InitLoaded, PMV.Inv_Mask);
1136
1137
1139 PHINode *Loaded_MaskOut = Builder.CreatePHI(PMV.WordType, 2);
1140 Loaded_MaskOut->addIncoming(InitLoaded_MaskOut, BB);
1141
1142
1143 Value *FullWord_NewVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Shifted);
1144 Value *FullWord_Cmp = Builder.CreateOr(Loaded_MaskOut, Cmp_Shifted);
1146 PMV.AlignedAddr, FullWord_Cmp, FullWord_NewVal, PMV.AlignedAddrAlignment,
1149
1150
1151
1152
1153
1155
1158
1161 else
1163
1164
1166
1167
1168
1169 Value *OldVal_MaskOut = Builder.CreateAnd(OldVal, PMV.Inv_Mask);
1170 Value *ShouldContinue = Builder.CreateICmpNE(Loaded_MaskOut, OldVal_MaskOut);
1171 Builder.CreateCondBr(ShouldContinue, LoopBB, EndBB);
1172
1173
1174 Loaded_MaskOut->addIncoming(OldVal_MaskOut, FailureBB);
1175
1176
1178
1183
1186 return true;
1187}
1188
1189void AtomicExpandImpl::expandAtomicOpToLLSC(
1190 Instruction *I, Type *ResultType, Value *Addr, Align AddrAlign,
1192 function_ref<Value *(IRBuilderBase &, Value *)> PerformOp) {
1193 ReplacementIRBuilder Builder(I, *DL);
1194 Value *Loaded = insertRMWLLSCLoop(Builder, ResultType, Addr, AddrAlign,
1195 MemOpOrder, PerformOp);
1196
1197 I->replaceAllUsesWith(Loaded);
1198 I->eraseFromParent();
1199}
1200
1201void AtomicExpandImpl::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
1202 ReplacementIRBuilder Builder(AI, *DL);
1203
1204 PartwordMaskValues PMV =
1207
1208
1209
1210
1214 CastOp = Instruction::SExt;
1215
1218 PMV.ShiftAmt, "ValOperand_Shifted");
1220 Builder, AI, PMV.AlignedAddr, ValOperand_Shifted, PMV.Mask, PMV.ShiftAmt,
1225}
1226
1227void AtomicExpandImpl::expandAtomicCmpXchgToMaskedIntrinsic(
1228 AtomicCmpXchgInst *CI) {
1229 ReplacementIRBuilder Builder(CI, *DL);
1230
1234
1237 "CmpVal_Shifted");
1240 "NewVal_Shifted");
1242 Builder, CI, PMV.AlignedAddr, CmpVal_Shifted, NewVal_Shifted, PMV.Mask,
1248 CmpVal_Shifted, Builder.CreateAnd(OldVal, PMV.Mask), "Success");
1250
1253}
1254
1255Value *AtomicExpandImpl::insertRMWLLSCLoop(
1256 IRBuilderBase &Builder, Type *ResultTy, Value *Addr, Align AddrAlign,
1258 function_ref<Value *(IRBuilderBase &, Value *)> PerformOp) {
1259 LLVMContext &Ctx = Builder.getContext();
1262
1263 assert(AddrAlign >= F->getDataLayout().getTypeStoreSize(ResultTy) &&
1264 "Expected at least natural alignment at this point.");
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1281
1282
1283
1284 std::prev(BB->end())->eraseFromParent();
1287
1288
1290 Value *Loaded = TLI->emitLoadLinked(Builder, ResultTy, Addr, MemOpOrder);
1291
1292 Value *NewVal = PerformOp(Builder, Loaded);
1293
1294 Value *StoreSuccess =
1297 StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
1298
1300
1301
1302
1303
1305
1307 return Loaded;
1308}
1309
1310
1311
1312
1313
1314
1315AtomicCmpXchgInst *
1316AtomicExpandImpl::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
1319 M->getDataLayout());
1320
1321 ReplacementIRBuilder Builder(CI, *DL);
1322
1324
1327
1333 LLVM_DEBUG(dbgs() << "Replaced " << *CI << " with " << *NewCI << "\n");
1334
1337
1339
1343
1346 return NewCI;
1347}
1348
1349bool AtomicExpandImpl::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
1355 LLVMContext &Ctx = F->getContext();
1356
1357
1358
1359
1361 AtomicOrdering MemOpOrder = ShouldInsertFencesForAtomic
1362 ? AtomicOrdering::Monotonic
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374 bool HasReleasedLoadBB = !CI->isWeak() && ShouldInsertFencesForAtomic &&
1375 SuccessOrder != AtomicOrdering::Monotonic &&
1376 SuccessOrder != AtomicOrdering::Acquire &&
1377 ->hasMinSize();
1378
1379
1380
1381 bool UseUnconditionalReleaseBarrier = F->hasMinSize() && !CI->isWeak();
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1433 auto NoStoreBB = BasicBlock::Create(Ctx, "cmpxchg.nostore", F, FailureBB);
1434 auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, NoStoreBB);
1435 auto ReleasedLoadBB =
1437 auto TryStoreBB =
1439 auto ReleasingStoreBB =
1441 auto StartBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, ReleasingStoreBB);
1442
1443 ReplacementIRBuilder Builder(CI, *DL);
1444
1445
1446
1447
1448 std::prev(BB->end())->eraseFromParent();
1450 if (ShouldInsertFencesForAtomic && UseUnconditionalReleaseBarrier)
1452
1453 PartwordMaskValues PMV =
1457
1458
1460 Value *UnreleasedLoad =
1461 TLI->emitLoadLinked(Builder, PMV.WordType, PMV.AlignedAddr, MemOpOrder);
1462 Value *UnreleasedLoadExtract =
1465 UnreleasedLoadExtract, CI->getCompareOperand(), "should_store");
1466
1467
1468
1469 Builder.CreateCondBr(ShouldStore, ReleasingStoreBB, NoStoreBB,
1470 MDBuilder(F->getContext()).createLikelyBranchWeights());
1471
1473 if (ShouldInsertFencesForAtomic && !UseUnconditionalReleaseBarrier)
1475 Builder.CreateBr(TryStoreBB);
1476
1478 PHINode *LoadedTryStore =
1479 Builder.CreatePHI(PMV.WordType, 2, "loaded.trystore");
1480 LoadedTryStore->addIncoming(UnreleasedLoad, ReleasingStoreBB);
1481 Value *NewValueInsert =
1484 PMV.AlignedAddr, MemOpOrder);
1486 StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
1487 BasicBlock *RetryBB = HasReleasedLoadBB ? ReleasedLoadBB : StartBB;
1488 Builder.CreateCondBr(StoreSuccess, SuccessBB,
1489 CI->isWeak() ? FailureBB : RetryBB,
1490 MDBuilder(F->getContext()).createLikelyBranchWeights());
1491
1493 Value *SecondLoad;
1494 if (HasReleasedLoadBB) {
1495 SecondLoad =
1496 TLI->emitLoadLinked(Builder, PMV.WordType, PMV.AlignedAddr, MemOpOrder);
1498 ShouldStore = Builder.CreateICmpEQ(SecondLoadExtract,
1500
1501
1502
1504 ShouldStore, TryStoreBB, NoStoreBB,
1505 MDBuilder(F->getContext()).createLikelyBranchWeights());
1506
1507 LoadedTryStore->addIncoming(SecondLoad, ReleasedLoadBB);
1508 } else
1510
1511
1512
1514 if (ShouldInsertFencesForAtomic ||
1518
1520 PHINode *LoadedNoStore =
1521 Builder.CreatePHI(UnreleasedLoad->getType(), 2, "loaded.nostore");
1522 LoadedNoStore->addIncoming(UnreleasedLoad, StartBB);
1523 if (HasReleasedLoadBB)
1524 LoadedNoStore->addIncoming(SecondLoad, ReleasedLoadBB);
1525
1526
1527
1528
1530 Builder.CreateBr(FailureBB);
1531
1533 PHINode *LoadedFailure =
1534 Builder.CreatePHI(UnreleasedLoad->getType(), 2, "loaded.failure");
1535 LoadedFailure->addIncoming(LoadedNoStore, NoStoreBB);
1537 LoadedFailure->addIncoming(LoadedTryStore, TryStoreBB);
1538 if (ShouldInsertFencesForAtomic)
1541
1542
1543
1544
1545
1547 PHINode *LoadedExit =
1548 Builder.CreatePHI(UnreleasedLoad->getType(), 2, "loaded.exit");
1549 LoadedExit->addIncoming(LoadedTryStore, SuccessBB);
1550 LoadedExit->addIncoming(LoadedFailure, FailureBB);
1551 PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2, "success");
1554
1555
1556
1557 Value *LoadedFull = LoadedExit;
1558
1561
1562
1563
1565 for (auto *User : CI->users()) {
1567 if (!EV)
1568 continue;
1569
1571 "weird extraction from { iN, i1 }");
1572
1575 else
1577
1579 }
1580
1581
1582 for (auto *EV : PrunedInsts)
1584
1586
1587
1591
1593 }
1594
1596 return true;
1597}
1598
1599bool AtomicExpandImpl::isIdempotentRMW(AtomicRMWInst *RMWI) {
1600
1602 if ()
1603 return false;
1604
1610 return C->isZero();
1612 return C->isMinusOne();
1614 return C->isMaxValue(true);
1616 return C->isMinValue(true);
1618 return C->isMaxValue(false);
1620 return C->isMinValue(false);
1621 default:
1622 return false;
1623 }
1624}
1625
1626bool AtomicExpandImpl::simplifyIdempotentRMW(AtomicRMWInst *RMWI) {
1628 tryExpandAtomicLoad(ResultingLoad);
1629 return true;
1630 }
1631 return false;
1632}
1633
1634Value *AtomicExpandImpl::insertRMWCmpXchgLoop(
1635 IRBuilderBase &Builder, Type *ResultTy, Value *Addr, Align AddrAlign,
1637 function_ref<Value *(IRBuilderBase &, Value *)> PerformOp,
1639 LLVMContext &Ctx = Builder.getContext();
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1661
1662
1663
1664
1665 std::prev(BB->end())->eraseFromParent();
1667 LoadInst *InitLoaded = Builder.CreateAlignedLoad(ResultTy, Addr, AddrAlign);
1669
1670
1672 PHINode *Loaded = Builder.CreatePHI(ResultTy, 2, "loaded");
1674
1675 Value *NewVal = PerformOp(Builder, Loaded);
1676
1677 Value *NewLoaded = nullptr;
1679
1680 CreateCmpXchg(Builder, Addr, Loaded, NewVal, AddrAlign,
1681 MemOpOrder == AtomicOrdering::Unordered
1682 ? AtomicOrdering::Monotonic
1683 : MemOpOrder,
1684 SSID, Success, NewLoaded, MetadataSrc);
1686
1688
1690
1691
1692
1693
1695
1697 return NewLoaded;
1698}
1699
1700bool AtomicExpandImpl::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
1703
1705 default:
1706 llvm_unreachable("Unhandled case in tryExpandAtomicCmpXchg");
1707 case TargetLoweringBase::AtomicExpansionKind::None:
1708 if (ValueSize < MinCASSize)
1709 return expandPartwordCmpXchg(CI);
1710 return false;
1711 case TargetLoweringBase::AtomicExpansionKind::LLSC: {
1712 return expandAtomicCmpXchg(CI);
1713 }
1714 case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic:
1715 expandAtomicCmpXchgToMaskedIntrinsic(CI);
1716 return true;
1717 case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
1719 case TargetLoweringBase::AtomicExpansionKind::CustomExpand: {
1721 return true;
1722 }
1723 }
1724}
1725
1726
1729 ReplacementIRBuilder Builder(AI, AI->getDataLayout());
1730 Builder.setIsFPConstrained(
1732
1733
1734
1735 Value *Loaded = AtomicExpandImpl::insertRMWCmpXchgLoop(
1739 return buildAtomicRMWValue(AI->getOperation(), Builder, Loaded,
1740 AI->getValOperand());
1741 },
1742 CreateCmpXchg, AI);
1743
1746 return true;
1747}
1748
1749
1750
1751
1752
1753
1756
1757
1758
1759
1760
1761
1762
1763 unsigned LargestSize = DL.getLargestLegalIntTypeSizeInBits() >= 64 ? 16 : 8;
1764 return Alignment >= Size &&
1766 Size <= LargestSize;
1767}
1768
1769void AtomicExpandImpl::expandAtomicLoadToLibcall(LoadInst *I) {
1770 static const RTLIB::Libcall Libcalls[6] = {
1771 RTLIB::ATOMIC_LOAD, RTLIB::ATOMIC_LOAD_1, RTLIB::ATOMIC_LOAD_2,
1772 RTLIB::ATOMIC_LOAD_4, RTLIB::ATOMIC_LOAD_8, RTLIB::ATOMIC_LOAD_16};
1774
1775 bool expanded = expandAtomicOpToLibcall(
1776 I, Size, I->getAlign(), I->getPointerOperand(), nullptr, nullptr,
1777 I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
1778 if (!expanded)
1779 handleFailure(*I, "unsupported atomic load");
1780}
1781
1782void AtomicExpandImpl::expandAtomicStoreToLibcall(StoreInst *I) {
1783 static const RTLIB::Libcall Libcalls[6] = {
1784 RTLIB::ATOMIC_STORE, RTLIB::ATOMIC_STORE_1, RTLIB::ATOMIC_STORE_2,
1785 RTLIB::ATOMIC_STORE_4, RTLIB::ATOMIC_STORE_8, RTLIB::ATOMIC_STORE_16};
1787
1788 bool expanded = expandAtomicOpToLibcall(
1789 I, Size, I->getAlign(), I->getPointerOperand(), I->getValueOperand(),
1790 nullptr, I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
1791 if (!expanded)
1792 handleFailure(*I, "unsupported atomic store");
1793}
1794
1795void AtomicExpandImpl::expandAtomicCASToLibcall(AtomicCmpXchgInst *I) {
1796 static const RTLIB::Libcall Libcalls[6] = {
1797 RTLIB::ATOMIC_COMPARE_EXCHANGE, RTLIB::ATOMIC_COMPARE_EXCHANGE_1,
1798 RTLIB::ATOMIC_COMPARE_EXCHANGE_2, RTLIB::ATOMIC_COMPARE_EXCHANGE_4,
1799 RTLIB::ATOMIC_COMPARE_EXCHANGE_8, RTLIB::ATOMIC_COMPARE_EXCHANGE_16};
1801
1802 bool expanded = expandAtomicOpToLibcall(
1803 I, Size, I->getAlign(), I->getPointerOperand(), I->getNewValOperand(),
1804 I->getCompareOperand(), I->getSuccessOrdering(), I->getFailureOrdering(),
1805 Libcalls);
1806 if (!expanded)
1807 handleFailure(*I, "unsupported cmpxchg");
1808}
1809
1811 static const RTLIB::Libcall LibcallsXchg[6] = {
1812 RTLIB::ATOMIC_EXCHANGE, RTLIB::ATOMIC_EXCHANGE_1,
1813 RTLIB::ATOMIC_EXCHANGE_2, RTLIB::ATOMIC_EXCHANGE_4,
1814 RTLIB::ATOMIC_EXCHANGE_8, RTLIB::ATOMIC_EXCHANGE_16};
1815 static const RTLIB::Libcall LibcallsAdd[6] = {
1816 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_ADD_1,
1817 RTLIB::ATOMIC_FETCH_ADD_2, RTLIB::ATOMIC_FETCH_ADD_4,
1818 RTLIB::ATOMIC_FETCH_ADD_8, RTLIB::ATOMIC_FETCH_ADD_16};
1819 static const RTLIB::Libcall LibcallsSub[6] = {
1820 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_SUB_1,
1821 RTLIB::ATOMIC_FETCH_SUB_2, RTLIB::ATOMIC_FETCH_SUB_4,
1822 RTLIB::ATOMIC_FETCH_SUB_8, RTLIB::ATOMIC_FETCH_SUB_16};
1823 static const RTLIB::Libcall LibcallsAnd[6] = {
1824 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_AND_1,
1825 RTLIB::ATOMIC_FETCH_AND_2, RTLIB::ATOMIC_FETCH_AND_4,
1826 RTLIB::ATOMIC_FETCH_AND_8, RTLIB::ATOMIC_FETCH_AND_16};
1827 static const RTLIB::Libcall LibcallsOr[6] = {
1828 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_OR_1,
1829 RTLIB::ATOMIC_FETCH_OR_2, RTLIB::ATOMIC_FETCH_OR_4,
1830 RTLIB::ATOMIC_FETCH_OR_8, RTLIB::ATOMIC_FETCH_OR_16};
1831 static const RTLIB::Libcall LibcallsXor[6] = {
1832 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_XOR_1,
1833 RTLIB::ATOMIC_FETCH_XOR_2, RTLIB::ATOMIC_FETCH_XOR_4,
1834 RTLIB::ATOMIC_FETCH_XOR_8, RTLIB::ATOMIC_FETCH_XOR_16};
1835 static const RTLIB::Libcall LibcallsNand[6] = {
1836 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_NAND_1,
1837 RTLIB::ATOMIC_FETCH_NAND_2, RTLIB::ATOMIC_FETCH_NAND_4,
1838 RTLIB::ATOMIC_FETCH_NAND_8, RTLIB::ATOMIC_FETCH_NAND_16};
1839
1840 switch (Op) {
1844 return ArrayRef(LibcallsXchg);
1846 return ArrayRef(LibcallsAdd);
1848 return ArrayRef(LibcallsSub);
1850 return ArrayRef(LibcallsAnd);
1852 return ArrayRef(LibcallsOr);
1854 return ArrayRef(LibcallsXor);
1856 return ArrayRef(LibcallsNand);
1871
1872 return {};
1873 }
1875}
1876
1877void AtomicExpandImpl::expandAtomicRMWToLibcall(AtomicRMWInst *I) {
1879
1881
1883 if (!Libcalls.empty())
1884 Success = expandAtomicOpToLibcall(
1885 I, Size, I->getAlign(), I->getPointerOperand(), I->getValOperand(),
1886 nullptr, I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
1887
1888
1889
1890
1891
1894 I, [this](IRBuilderBase &Builder, Value *Addr, Value *Loaded,
1897 Instruction *MetadataSrc) {
1898
1900 Addr, Loaded, NewVal, Alignment, MemOpOrder,
1902 if (MetadataSrc)
1904
1907
1908
1909 expandAtomicCASToLibcall(Pair);
1910 });
1911 }
1912}
1913
1914
1915
1916
1917
1918
1919
1920bool AtomicExpandImpl::expandAtomicOpToLibcall(
1921 Instruction *I, unsigned Size, Align Alignment, Value *PointerOperand,
1925
1926 LLVMContext &Ctx = I->getContext();
1928 const DataLayout &DL = M->getDataLayout();
1930 IRBuilder<> AllocaBuilder(&I->getFunction()->getEntryBlock().front());
1931
1933 Type *SizedIntTy = Type::getIntNTy(Ctx, Size * 8);
1934
1935 const Align AllocaAlignment = DL.getPrefTypeAlign(SizedIntTy);
1936
1937
1938
1939 assert(Ordering != AtomicOrdering::NotAtomic && "expect atomic MO");
1941 ConstantInt::get(Type::getInt32Ty(Ctx), (int)toCABI(Ordering));
1942 Constant *Ordering2Val = nullptr;
1943 if (CASExpected) {
1944 assert(Ordering2 != AtomicOrdering::NotAtomic && "expect atomic MO");
1945 Ordering2Val =
1946 ConstantInt::get(Type::getInt32Ty(Ctx), (int)toCABI(Ordering2));
1947 }
1948 bool HasResult = I->getType() != Type::getVoidTy(Ctx);
1949
1950 RTLIB::Libcall RTLibType;
1951 if (UseSizedLibcall) {
1952 switch (Size) {
1953 case 1:
1954 RTLibType = Libcalls[1];
1955 break;
1956 case 2:
1957 RTLibType = Libcalls[2];
1958 break;
1959 case 4:
1960 RTLibType = Libcalls[3];
1961 break;
1962 case 8:
1963 RTLibType = Libcalls[4];
1964 break;
1965 case 16:
1966 RTLibType = Libcalls[5];
1967 break;
1968 }
1969 } else if (Libcalls[0] != RTLIB::UNKNOWN_LIBCALL) {
1970 RTLibType = Libcalls[0];
1971 } else {
1972
1973
1974 return false;
1975 }
1976
1978
1979 return false;
1980 }
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008 AllocaInst *AllocaCASExpected = nullptr;
2009 AllocaInst *AllocaValue = nullptr;
2010 AllocaInst *AllocaResult = nullptr;
2011
2012 Type *ResultTy;
2014 AttributeList Attr;
2015
2016
2017 if (!UseSizedLibcall) {
2018
2019 Args.push_back(ConstantInt::get(DL.getIntPtrType(Ctx), Size));
2020 }
2021
2022
2023
2024
2025
2026
2027 Value *PtrVal = PointerOperand;
2028 PtrVal = Builder.CreateAddrSpaceCast(PtrVal, PointerType::getUnqual(Ctx));
2029 Args.push_back(PtrVal);
2030
2031
2032 if (CASExpected) {
2033 AllocaCASExpected = AllocaBuilder.CreateAlloca(CASExpected->getType());
2034 AllocaCASExpected->setAlignment(AllocaAlignment);
2036 Builder.CreateAlignedStore(CASExpected, AllocaCASExpected, AllocaAlignment);
2037 Args.push_back(AllocaCASExpected);
2038 }
2039
2040
2041 if (ValueOperand) {
2042 if (UseSizedLibcall) {
2043 Value *IntValue =
2045 Args.push_back(IntValue);
2046 } else {
2047 AllocaValue = AllocaBuilder.CreateAlloca(ValueOperand->getType());
2050 Builder.CreateAlignedStore(ValueOperand, AllocaValue, AllocaAlignment);
2051 Args.push_back(AllocaValue);
2052 }
2053 }
2054
2055
2056 if (!CASExpected && HasResult && !UseSizedLibcall) {
2057 AllocaResult = AllocaBuilder.CreateAlloca(I->getType());
2058 AllocaResult->setAlignment(AllocaAlignment);
2060 Args.push_back(AllocaResult);
2061 }
2062
2063
2064 Args.push_back(OrderingVal);
2065
2066
2067 if (Ordering2Val)
2068 Args.push_back(Ordering2Val);
2069
2070
2071 if (CASExpected) {
2072 ResultTy = Type::getInt1Ty(Ctx);
2073 Attr = Attr.addRetAttribute(Ctx, Attribute::ZExt);
2074 } else if (HasResult && UseSizedLibcall)
2075 ResultTy = SizedIntTy;
2076 else
2077 ResultTy = Type::getVoidTy(Ctx);
2078
2079
2081 for (Value *Arg : Args)
2082 ArgTys.push_back(Arg->getType());
2083 FunctionType *FnType = FunctionType::get(ResultTy, ArgTys, false);
2084 FunctionCallee LibcallFn =
2085 M->getOrInsertFunction(TLI->getLibcallName(RTLibType), FnType, Attr);
2086 CallInst *Call = Builder.CreateCall(LibcallFn, Args);
2089
2090
2091 if (ValueOperand && !UseSizedLibcall)
2093
2094 if (CASExpected) {
2095
2096
2097 Type *FinalResultTy = I->getType();
2100 CASExpected->getType(), AllocaCASExpected, AllocaAlignment);
2105 } else if (HasResult) {
2107 if (UseSizedLibcall)
2109 else {
2111 AllocaAlignment);
2113 }
2115 }
2117 return true;
2118}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static Value * performMaskedAtomicOp(AtomicRMWInst::BinOp Op, IRBuilderBase &Builder, Value *Loaded, Value *Shifted_Inc, Value *Inc, const PartwordMaskValues &PMV)
Emit IR to implement a masked version of a given atomicrmw operation.
Definition AtomicExpandPass.cpp:917
static PartwordMaskValues createMaskInstrs(IRBuilderBase &Builder, Instruction *I, Type *ValueType, Value *Addr, Align AddrAlign, unsigned MinWordSize)
This is a helper function which builds instructions to provide values necessary for partword atomic o...
Definition AtomicExpandPass.cpp:819
static bool canUseSizedAtomicCall(unsigned Size, Align Alignment, const DataLayout &DL)
Definition AtomicExpandPass.cpp:1754
static Value * extractMaskedValue(IRBuilderBase &Builder, Value *WideWord, const PartwordMaskValues &PMV)
Definition AtomicExpandPass.cpp:886
static void createCmpXchgInstFun(IRBuilderBase &Builder, Value *Addr, Value *Loaded, Value *NewVal, Align AddrAlign, AtomicOrdering MemOpOrder, SyncScope::ID SSID, Value *&Success, Value *&NewLoaded, Instruction *MetadataSrc)
Definition AtomicExpandPass.cpp:652
Expand Atomic static false unsigned getAtomicOpSize(LoadInst *LI)
Definition AtomicExpandPass.cpp:187
static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I)
Definition AtomicExpandPass.cpp:244
static Value * insertMaskedValue(IRBuilderBase &Builder, Value *WideWord, Value *Updated, const PartwordMaskValues &PMV)
Definition AtomicExpandPass.cpp:897
static void copyMetadataForAtomic(Instruction &Dest, const Instruction &Source)
Copy metadata that's safe to preserve when widening atomics.
Definition AtomicExpandPass.cpp:208
static ArrayRef< RTLIB::Libcall > GetRMWLibcall(AtomicRMWInst::BinOp Op)
Definition AtomicExpandPass.cpp:1810
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool runOnFunction(Function &F, bool PostInlining)
Module.h This file contains the declarations for the Module class.
static bool isIdempotentRMW(AtomicRMWInst &RMWI)
Return true if and only if the given instruction does not modify the memory location referenced.
Machine Check Debug Module
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
This file contains the declarations for profiling metadata utility functions.
This file defines the SmallVector class.
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
void setAlignment(Align Align)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
An instruction that atomically checks whether a specified value is in a memory location,...
Value * getNewValOperand()
AtomicOrdering getMergedOrdering() const
Returns a single ordering which is at least as strong as both the success and failure orderings for t...
void setWeak(bool IsWeak)
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Value * getCompareOperand()
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Value * getPointerOperand()
static AtomicOrdering getStrongestFailureOrdering(AtomicOrdering SuccessOrdering)
Returns the strongest permitted ordering on failure, given the desired ordering on success.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
void setVolatile(bool V)
Specify whether this is a volatile cmpxchg.
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition AtomicExpandPass.cpp:435
an instruction that atomically reads a memory location, combines it with another value,...
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
Value * getPointerOperand()
BinOp getOperation() const
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
static LLVM_ABI StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
iterator begin()
Instruction iterator methods.
const Function * getParent() const
Return the enclosing method, or null if none.
reverse_iterator rbegin()
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
LLVM_ABI BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
InstListType::reverse_iterator reverse_iterator
void setAttributes(AttributeList A)
Set the attributes for this call.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
FunctionPass class - This class is used to implement most global optimizations.
BasicBlockListType::iterator iterator
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Common base class shared among various IRBuilders.
AtomicCmpXchgInst * CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID=SyncScope::System)
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
LLVM_ABI CallInst * CreateLifetimeStart(Value *Ptr)
Create a lifetime.start intrinsic.
LLVM_ABI CallInst * CreateLifetimeEnd(Value *Ptr)
Create a lifetime.end intrinsic.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
UnreachableInst * CreateUnreachable()
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
BasicBlock::iterator GetInsertPoint() const
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="", MDNode *FPMathTag=nullptr, FMFSource FMFSource={})
BasicBlock * GetInsertBlock() const
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
LLVMContext & getContext() const
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
AtomicRMWInst * CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val, MaybeAlign Align, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
BranchInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Value * CreateAddrSpaceCast(Value *V, Type *DestTy, const Twine &Name="")
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
InstSimplifyFolder - Use InstructionSimplify to fold operations to existing values.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
LLVM_ABI void moveAfter(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
This is an important class for using LLVM in a threaded context.
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
LLVM_ABI void getSyncScopeNames(SmallVectorImpl< StringRef > &SSNs) const
getSyncScopeNames - Populates client supplied SmallVector with synchronization scope names registered...
An instruction for reading from memory.
Value * getPointerOperand()
bool isVolatile() const
Return true if this is a load from a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
void setVolatile(bool V)
Specify whether this is a volatile load or not.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Align getAlign() const
Return the alignment of the access that is being performed.
A Module instance is used to store all the information related to an LLVM module.
LLVMContext & getContext() const
Get the global data context.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static LLVM_ABI PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
virtual Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const
Perform a store-conditional operation to Addr.
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
virtual void emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const
Perform a bit test atomicrmw using a target-specific intrinsic.
virtual bool shouldInsertFencesForAtomic(const Instruction *I) const
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
virtual AtomicOrdering atomicOperationOrderAfterFenceSplit(const Instruction *I) const
virtual void emitExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) const
Perform a cmpxchg expansion using a target-specific method.
unsigned getMinCmpXchgSizeInBits() const
Returns the size of the smallest cmpxchg or ll/sc instruction the backend supports.
virtual Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const
Perform a masked atomicrmw using a target-specific intrinsic.
virtual Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
virtual void emitExpandAtomicRMW(AtomicRMWInst *AI) const
Perform a atomicrmw expansion using a target-specific way.
virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const
virtual void emitExpandAtomicStore(StoreInst *SI) const
Perform a atomic store using a target-specific way.
virtual AtomicExpansionKind shouldCastAtomicRMWIInIR(AtomicRMWInst *RMWI) const
Returns how the given atomic atomicrmw should be cast by the IR-level AtomicExpand pass.
virtual AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
virtual bool shouldInsertTrailingFenceForAtomicStore(const Instruction *I) const
Whether AtomicExpandPass should automatically insert a trailing fence without reducing the ordering f...
virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
virtual Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const
Perform a masked cmpxchg using a target-specific intrinsic.
unsigned getMaxAtomicSizeInBitsSupported() const
Returns the maximum atomic operation size (in bits) supported by the backend.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
virtual void emitExpandAtomicLoad(LoadInst *LI) const
Perform a atomic load using a target-specific way.
virtual AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
virtual void emitCmpArithAtomicRMWIntrinsic(AtomicRMWInst *AI) const
Perform a atomicrmw which the result is only used by comparison, using a target-specific intrinsic.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
virtual AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
virtual Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
virtual AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
virtual Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
Inserts in the IR a target-specific intrinsic specifying a fence.
virtual LoadInst * lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const
On some platforms, an AtomicRMW that never actually modifies the value (such as fetch_add of 0) can b...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
Target-Independent Code Generator Pass Configuration Options.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isVoidTy() const
Return true if this is 'void'.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
LLVM_ABI bool canInstructionHaveMMRAs(const Instruction &I)
LLVM_ABI void setExplicitlyUnknownBranchWeightsIfProfiled(Instruction &I, StringRef PassName, const Function *F=nullptr)
Like setExplicitlyUnknownBranchWeights(...), but only sets unknown branch weights in the new instruct...
decltype(auto) dyn_cast(const From &Val)
dyn_cast - Return the argument parameter cast to the specified type.
bool isReleaseOrStronger(AtomicOrdering AO)
AtomicOrderingCABI toCABI(AtomicOrdering AO)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void initializeAtomicExpandLegacyPass(PassRegistry &)
function_ref< void( IRBuilderBase &, Value *, Value *, Value *, Align, AtomicOrdering, SyncScope::ID, Value *&, Value *&, Instruction *)> CreateCmpXchgInstFun
Parameters (see the expansion example below): (the builder, addr, loaded, new_val,...
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
Value * buildAtomicRMWValue(AtomicRMWInst::BinOp Op, IRBuilderBase &Builder, Value *Loaded, Value *Val)
Emit IR to implement the given atomicrmw operation on values in registers, returning the new value.
AtomicOrdering
Atomic ordering for LLVM's memory model.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
bool expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, CreateCmpXchgInstFun CreateCmpXchg)
Expand an atomic RMW instruction into a loop utilizing cmpxchg.
Definition AtomicExpandPass.cpp:1727
FunctionAddr VTableAddr Next
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAcquireOrStronger(AtomicOrdering AO)
constexpr unsigned BitWidth
bool lowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI)
Convert the given Cmpxchg into primitive load and compare.
decltype(auto) cast(const From &Val)
cast - Return the argument parameter cast to the specified type.
bool lowerAtomicRMWInst(AtomicRMWInst *RMWI)
Convert the given RMWI into primitive load and stores, assuming that doing so is legal.
PointerUnion< const Value *, const PseudoSourceValue * > ValueType
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
Definition AtomicExpandPass.cpp:431
LLVM_ABI char & AtomicExpandID
AtomicExpandID – Lowers atomic operations in terms of either cmpxchg load-linked/store-conditional lo...
Definition AtomicExpandPass.cpp:178
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
TypeSize getStoreSizeInBits() const
Return the number of bits overwritten by a store of the specified value type.