clang: lib/CodeGen/CGCall.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
32#include "llvm/ADT/StringExtras.h"
33#include "llvm/Analysis/ValueTracking.h"
34#include "llvm/IR/Assumptions.h"
35#include "llvm/IR/AttributeMask.h"
36#include "llvm/IR/Attributes.h"
37#include "llvm/IR/CallingConv.h"
38#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/InlineAsm.h"
40#include "llvm/IR/IntrinsicInst.h"
41#include "llvm/IR/Intrinsics.h"
42#include "llvm/IR/Type.h"
43#include "llvm/Transforms/Utils/Local.h"
44#include
45using namespace clang;
46using namespace CodeGen;
47
48
49
51 switch (CC) {
52 default: return llvm::CallingConv::C;
53 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
54 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
55 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
56 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
57 case CC_Win64: return llvm::CallingConv::Win64;
58 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
59 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
60 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
61 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
62
63 case CC_X86Pascal: return llvm::CallingConv::C;
64
65 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
67 case CC_AArch64SVEPCS: return llvm::CallingConv::AArch64_SVE_VectorCall;
69 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
71 case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
72 case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
73 case CC_Swift: return llvm::CallingConv::Swift;
74 case CC_SwiftAsync: return llvm::CallingConv::SwiftTail;
75 case CC_M68kRTD: return llvm::CallingConv::M68k_RTD;
76 case CC_PreserveNone: return llvm::CallingConv::PreserveNone;
77
79
80 }
81}
82
83
84
85
86
90 if (RD)
92 else
93 RecTy = Context.VoidTy;
94
95 if (MD)
98}
99
100
104}
105
106
107
108
109
112}
113
114
115
118
119
123}
124
128 unsigned prefixArgs,
129 unsigned totalArgs) {
131 assert(paramInfos.size() <= prefixArgs);
132 assert(proto->getNumParams() + prefixArgs <= totalArgs);
133
134 paramInfos.reserve(totalArgs);
135
136
137 paramInfos.resize(prefixArgs);
138
139
141 paramInfos.push_back(ParamInfo);
142
143 if (ParamInfo.hasPassObjectSize())
144 paramInfos.emplace_back();
145 }
146
147 assert(paramInfos.size() <= totalArgs &&
148 "Did we forget to insert pass_object_size args?");
149
150 paramInfos.resize(totalArgs);
151}
152
153
154
159
160 if (!FPT->hasExtParameterInfos()) {
161 assert(paramInfos.empty() &&
162 "We have paramInfos, but the prototype doesn't?");
163 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
164 return;
165 }
166
167 unsigned PrefixSize = prefix.size();
168
169
170
171 prefix.reserve(prefix.size() + FPT->getNumParams());
172
173 auto ExtInfos = FPT->getExtParameterInfos();
174 assert(ExtInfos.size() == FPT->getNumParams());
175 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
176 prefix.push_back(FPT->getParamType(I));
177 if (ExtInfos[I].hasPassObjectSize())
179 }
180
182 prefix.size());
183}
184
185
186
193
196
200 FTP->getExtInfo(), paramInfos, Required);
201}
202
203
204
208 return ::arrangeLLVMFunctionInfo(*this, false, argTypes,
209 FTP);
210}
211
213 bool IsWindows) {
214
217
220
223
226
229
232
233 if (PcsAttr *PCS = D->getAttr())
235
238
241
244
247
250
253
256
259
262
265
268
270}
271
272
273
274
275
276
277
283
284
286
287 return ::arrangeLLVMFunctionInfo(
288 *this, true, argTypes,
290}
291
292
295 if (FD->hasAttr()) {
299 }
300}
301
302
303
304
305
308 assert(!isa(MD) && "wrong method for constructors!");
309 assert(!isa(MD) && "wrong method for destructors!");
310
314
316
320 }
321
323}
324
327
328
331 .getCXXABI().hasConstructorVariants();
332}
333
336 auto *MD = cast(GD.getDecl());
337
340
343
344 bool PassParams = true;
345
346 if (auto *CD = dyn_cast(MD)) {
347
348
349 if (auto Inherited = CD->getInheritedConstructor())
351 }
352
354
355
356 if (PassParams)
358
361 if (!paramInfos.empty()) {
362
363 if (AddedArgs.Prefix)
364 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
366 if (AddedArgs.Suffix)
367 paramInfos.append(AddedArgs.Suffix,
369 }
370
372 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
374
381 argTypes, extInfo, paramInfos, required);
382}
383
387 for (auto &arg : args)
389 return argTypes;
390}
391
395 for (auto &arg : args)
397 return argTypes;
398}
399
402 unsigned prefixArgs, unsigned totalArgs) {
406 }
407 return result;
408}
409
410
411
412
413
414
415
416
417
422 unsigned ExtraPrefixArgs,
423 unsigned ExtraSuffixArgs,
424 bool PassProtoArgs) {
425
427 for (const auto &Arg : args)
429
430
431 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
432
436 FPT, TotalPrefixArgs + ExtraSuffixArgs)
438
444
447
448
449 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
450
452 ArgTypes.size());
453 }
454
456 ArgTypes, Info, ParamInfos, Required);
457}
458
459
460
463 if (const CXXMethodDecl *MD = dyn_cast(FD))
464 if (MD->isImplicitObjectMemberFunction())
466
468
469 assert(isa(FTy));
471
472
473
476 {}, noProto->getExtInfo(), {},
478 }
479
481}
482
483
484
487
488
490}
491
492
493
494
495
496
497
507
508 for (const auto *I : MD->parameters()) {
511 I->hasAttr());
512 extParamInfos.push_back(extParamInfo);
513 }
514
518
519 if (getContext().getLangOpts().ObjCAutoRefCount &&
520 MD->hasAttr())
522
525
528 required);
529}
530
536
539}
540
543
545
546 if (isa(GD.getDecl()) ||
547 isa(GD.getDecl()))
549
551}
552
553
554
555
556
557
560 assert(MD->isVirtual() && "only methods have thunks");
565}
566
571
577 ArgTys.push_back(*FTP->param_type_begin());
579 ArgTys.push_back(Context.IntTy);
581 false, true);
585}
586
587
588
594 unsigned numExtraRequiredArgs,
595 bool chainCall) {
596 assert(args.size() >= numExtraRequiredArgs);
597
599
600
602
603
604
605 if (const FunctionProtoType *proto = dyn_cast(fnType)) {
606 if (proto->isVariadic())
608
609 if (proto->hasExtParameterInfos())
611 args.size());
612
613
614
615
616
619 cast(fnType))) {
621 }
622
623
625 for (const auto &arg : args)
629 opts, argTypes, fnType->getExtInfo(),
630 paramInfos, required);
631}
632
633
634
635
636
640 bool chainCall) {
642 chainCall ? 1 : 0, chainCall);
643}
644
645
646
651 false);
652}
653
659
664}
665
669
671 for (const auto &Arg : args)
676}
677
682
686}
687
694}
695
696
697
698
699
704 unsigned numPrefixArgs) {
705 assert(numPrefixArgs + 1 <= args.size() &&
706 "Emitting a call with less args than the required prefix?");
707
708
709 auto paramInfos =
711
712
714
718 paramInfos, required);
719}
720
725}
726
730 assert(signature.arg_size() <= args.size());
731 if (signature.arg_size() == args.size())
732 return signature;
733
736 if (!sigParamInfos.empty()) {
737 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
738 paramInfos.resize(args.size());
739 }
740
742
754}
755
757namespace CodeGen {
759}
760}
761
762
763
764
770 assert(llvm::all_of(argTypes,
771 [](CanQualType T) { return T.isCanonicalAsParam(); }));
772
773
774 llvm::FoldingSetNodeID ID;
777 bool isChainCall =
779 bool isDelegateCall =
782 info, paramInfos, required, resultType, argTypes);
783
784 void *insertPos = nullptr;
785 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
786 if (FI)
787 return *FI;
788
790
791
793 info, paramInfos, resultType, argTypes, required);
794 FunctionInfos.InsertNode(FI, insertPos);
795
796 bool inserted = FunctionsBeingProcessed.insert(FI).second;
797 (void)inserted;
798 assert(inserted && "Recursively being processed?");
799
800
801 if (CC == llvm::CallingConv::SPIR_KERNEL) {
802
803
807 } else {
809 }
810
811
812
813
817
819 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
820 I.info.setCoerceToType(ConvertType(I.type));
821
822 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
823 assert(erased && "Not in set?");
824
825 return *FI;
826}
827
829 bool chainCall, bool delegateCall,
835 assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
838
839 void *buffer =
840 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
841 argTypes.size() + 1, paramInfos.size()));
842
844 FI->CallingConvention = llvmCC;
845 FI->EffectiveCallingConvention = llvmCC;
846 FI->ASTCallingConvention = info.getCC();
847 FI->InstanceMethod = instanceMethod;
848 FI->ChainCall = chainCall;
849 FI->DelegateCall = delegateCall;
855 FI->Required = required;
858 FI->ArgStruct = nullptr;
859 FI->ArgStructAlign = 0;
860 FI->NumArgs = argTypes.size();
861 FI->HasExtParameterInfos = !paramInfos.empty();
862 FI->getArgsBuffer()[0].type = resultType;
863 FI->MaxVectorWidth = 0;
864 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
865 FI->getArgsBuffer()[i + 1].type = argTypes[i];
866 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
867 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
868 return FI;
869}
870
871
872
873namespace {
874
875
876
877struct TypeExpansion {
878 enum TypeExpansionKind {
879
880 TEK_ConstantArray,
881
882
883 TEK_Record,
884
886
887 TEK_None
888 };
889
890 const TypeExpansionKind Kind;
891
892 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
893 virtual ~TypeExpansion() {}
894};
895
896struct ConstantArrayExpansion : TypeExpansion {
899
900 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
901 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
902 static bool classof(const TypeExpansion *TE) {
903 return TE->Kind == TEK_ConstantArray;
904 }
905};
906
907struct RecordExpansion : TypeExpansion {
909
911
914 : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
915 Fields(std::move(Fields)) {}
916 static bool classof(const TypeExpansion *TE) {
917 return TE->Kind == TEK_Record;
918 }
919};
920
921struct ComplexExpansion : TypeExpansion {
923
924 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
925 static bool classof(const TypeExpansion *TE) {
927 }
928};
929
930struct NoExpansion : TypeExpansion {
931 NoExpansion() : TypeExpansion(TEK_None) {}
932 static bool classof(const TypeExpansion *TE) {
933 return TE->Kind == TEK_None;
934 }
935};
936}
937
938static std::unique_ptr
941 return std::make_unique(AT->getElementType(),
942 AT->getZExtSize());
943 }
947 const RecordDecl *RD = RT->getDecl();
949 "Cannot expand structure with flexible array.");
951
952
953 const FieldDecl *LargestFD = nullptr;
955
956 for (const auto *FD : RD->fields()) {
957 if (FD->isZeroLengthBitField())
958 continue;
959 assert(!FD->isBitField() &&
960 "Cannot expand structure with bit-field members.");
962 if (UnionSize < FieldSize) {
963 UnionSize = FieldSize;
964 LargestFD = FD;
965 }
966 }
967 if (LargestFD)
968 Fields.push_back(LargestFD);
969 } else {
970 if (const auto *CXXRD = dyn_cast(RD)) {
971 assert(!CXXRD->isDynamicClass() &&
972 "cannot expand vtable pointers in dynamic classes");
973 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases()));
974 }
975
976 for (const auto *FD : RD->fields()) {
977 if (FD->isZeroLengthBitField())
978 continue;
979 assert(!FD->isBitField() &&
980 "Cannot expand structure with bit-field members.");
981 Fields.push_back(FD);
982 }
983 }
984 return std::make_unique(std::move(Bases),
985 std::move(Fields));
986 }
988 return std::make_unique(CT->getElementType());
989 }
990 return std::make_unique();
991}
992
995 if (auto CAExp = dyn_cast(Exp.get())) {
996 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
997 }
998 if (auto RExp = dyn_cast(Exp.get())) {
999 int Res = 0;
1000 for (auto BS : RExp->Bases)
1002 for (auto FD : RExp->Fields)
1004 return Res;
1005 }
1006 if (isa(Exp.get()))
1007 return 2;
1008 assert(isa(Exp.get()));
1009 return 1;
1010}
1011
1012void
1016 if (auto CAExp = dyn_cast(Exp.get())) {
1017 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
1019 }
1020 } else if (auto RExp = dyn_cast(Exp.get())) {
1021 for (auto BS : RExp->Bases)
1023 for (auto FD : RExp->Fields)
1025 } else if (auto CExp = dyn_cast(Exp.get())) {
1026 llvm::Type *EltTy = ConvertType(CExp->EltTy);
1027 *TI++ = EltTy;
1028 *TI++ = EltTy;
1029 } else {
1030 assert(isa(Exp.get()));
1032 }
1033}
1034
1036 ConstantArrayExpansion *CAE,
1038 llvm::function_ref<void(Address)> Fn) {
1039 for (int i = 0, n = CAE->NumElts; i < n; i++) {
1041 Fn(EltAddr);
1042 }
1043}
1044
1045void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
1046 llvm::Function::arg_iterator &AI) {
1048 "Unexpected non-simple lvalue during struct expansion.");
1049
1051 if (auto CAExp = dyn_cast(Exp.get())) {
1054 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1055 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1056 });
1057 } else if (auto RExp = dyn_cast(Exp.get())) {
1060
1065
1066
1067 ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1068 }
1069 for (auto FD : RExp->Fields) {
1070
1072 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1073 }
1074 } else if (isa(Exp.get())) {
1075 auto realValue = &*AI++;
1076 auto imagValue = &*AI++;
1078 } else {
1079
1080
1081 assert(isa(Exp.get()));
1082 llvm::Value *Arg = &*AI++;
1085 } else {
1086
1087
1088
1089 if (Arg->getType()->isPointerTy()) {
1092 }
1094 }
1095 }
1096}
1097
1098void CodeGenFunction::ExpandTypeToArgs(
1102 if (auto CAExp = dyn_cast(Exp.get())) {
1106 *this, CAExp, Addr, [&](Address EltAddr) {
1109 CAExp->EltTy);
1110 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1111 IRCallArgPos);
1112 });
1113 } else if (auto RExp = dyn_cast(Exp.get())) {
1117
1122
1123
1124 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1125 IRCallArgPos);
1126 }
1127
1129 for (auto FD : RExp->Fields) {
1132 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1133 IRCallArgPos);
1134 }
1135 } else if (isa(Exp.get())) {
1137 IRCallArgs[IRCallArgPos++] = CV.first;
1138 IRCallArgs[IRCallArgPos++] = CV.second;
1139 } else {
1140 assert(isa(Exp.get()));
1142 assert(RV.isScalar() &&
1143 "Unexpected non-scalar rvalue during struct expansion.");
1144
1145
1146 llvm::Value *V = RV.getScalarVal();
1147 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1148 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1149 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1150
1151 IRCallArgs[IRCallArgPos++] = V;
1152 }
1153}
1154
1155
1157 llvm::Type *Ty,
1159 const Twine &Name = "tmp") {
1160
1161 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(Ty);
1163
1165}
1166
1167
1168
1169
1170
1173 llvm::StructType *SrcSTy,
1175
1176 if (SrcSTy->getNumElements() == 0) return SrcPtr;
1177
1179
1180
1181
1182
1183
1184 uint64_t FirstEltSize =
1186 if (FirstEltSize < DstSize &&
1187 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1188 return SrcPtr;
1189
1190
1192
1193
1195 if (llvm::StructType *SrcSTy = dyn_castllvm::StructType(SrcTy))
1197
1198 return SrcPtr;
1199}
1200
1201
1202
1203
1204
1205
1206
1207
1209 llvm::Type *Ty,
1211 if (Val->getType() == Ty)
1212 return Val;
1213
1214 if (isallvm::PointerType(Val->getType())) {
1215
1216 if (isallvm::PointerType(Ty))
1217 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1218
1219
1220 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1221 }
1222
1223 llvm::Type *DestIntTy = Ty;
1224 if (isallvm::PointerType(DestIntTy))
1226
1227 if (Val->getType() != DestIntTy) {
1229 if (DL.isBigEndian()) {
1230
1231
1232 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1233 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1234
1235 if (SrcSize > DstSize) {
1236 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1237 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1238 } else {
1239 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1240 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1241 }
1242 } else {
1243
1244 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1245 }
1246 }
1247
1248 if (isallvm::PointerType(Ty))
1249 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1250 return Val;
1251}
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1265
1266
1267 if (SrcTy == Ty)
1269
1270 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1271
1272 if (llvm::StructType *SrcSTy = dyn_castllvm::StructType(SrcTy)) {
1274 DstSize.getFixedValue(), CGF);
1276 }
1277
1278 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1279
1280
1281
1282 if ((isallvm::IntegerType(Ty) || isallvm::PointerType(Ty)) &&
1283 (isallvm::IntegerType(SrcTy) || isallvm::PointerType(SrcTy))) {
1286 }
1287
1288
1289 if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1290 SrcSize.getFixedValue() >= DstSize.getFixedValue()) {
1291
1292
1293
1294
1295
1296
1299 }
1300
1301
1302
1303
1304 if (auto *ScalableDstTy = dyn_castllvm::ScalableVectorType(Ty)) {
1305 if (auto *FixedSrcTy = dyn_castllvm::FixedVectorType(SrcTy)) {
1306
1307
1308 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
1309 ScalableDstTy->getElementCount().isKnownMultipleOf(8) &&
1310 FixedSrcTy->getElementType()->isIntegerTy(8)) {
1311 ScalableDstTy = llvm::ScalableVectorType::get(
1312 FixedSrcTy->getElementType(),
1313 ScalableDstTy->getElementCount().getKnownMinValue() / 8);
1314 }
1315 if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) {
1317 auto *PoisonVec = llvm::PoisonValue::get(ScalableDstTy);
1318 auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
1319 llvm::Value *Result = CGF.Builder.CreateInsertVector(
1320 ScalableDstTy, PoisonVec, Load, Zero, "cast.scalable");
1321 if (ScalableDstTy != Ty)
1324 }
1325 }
1326 }
1327
1328
1334 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinValue()));
1336}
1337
1339 llvm::TypeSize DstSize,
1340 bool DstIsVolatile) {
1341 if (!DstSize)
1342 return;
1343
1344 llvm::Type *SrcTy = Src->getType();
1345 llvm::TypeSize SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
1346
1347
1348
1349
1351 if (llvm::StructType *DstSTy =
1353 assert(!SrcSize.isScalable());
1355 SrcSize.getFixedValue(), *this);
1356 }
1357 }
1358
1359 if (SrcSize.isScalable() || SrcSize <= DstSize) {
1360 if (SrcTy->isIntegerTy() && Dst.getElementType()->isPointerTy() &&
1362
1365 } else if (llvm::StructType *STy =
1366 dyn_castllvm::StructType(Src->getType())) {
1367
1369 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1371 llvm::Value *Elt = Builder.CreateExtractValue(Src, i);
1373 }
1374 } else {
1376 }
1377 } else if (SrcTy->isIntegerTy()) {
1378
1379 llvm::Type *DstIntTy = Builder.getIntNTy(DstSize.getFixedValue() * 8);
1382 } else {
1383
1384
1385
1386
1387
1388
1389
1390
1391
1399 }
1400}
1401
1409 }
1410 return addr;
1411}
1412
1413static std::pair<llvm::Value *, bool>
1415 llvm::ScalableVectorType *FromTy, llvm::Value *V,
1416 StringRef Name = "") {
1417
1418
1419 if (FromTy->getElementType()->isIntegerTy(1) &&
1420 FromTy->getElementCount().isKnownMultipleOf(8) &&
1421 ToTy->getElementType() == CGF.Builder.getInt8Ty()) {
1422 FromTy = llvm::ScalableVectorType::get(
1423 ToTy->getElementType(),
1424 FromTy->getElementCount().getKnownMinValue() / 8);
1425 V = CGF.Builder.CreateBitCast(V, FromTy);
1426 }
1427 if (FromTy->getElementType() == ToTy->getElementType()) {
1428 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
1429
1430 V->setName(Name + ".coerce");
1431 V = CGF.Builder.CreateExtractVector(ToTy, V, Zero, "cast.fixed");
1432 return {V, true};
1433 }
1434 return {V, false};
1435}
1436
1437namespace {
1438
1439
1440
1441class ClangToLLVMArgMapping {
1442 static const unsigned InvalidIndex = ~0U;
1443 unsigned InallocaArgNo;
1444 unsigned SRetArgNo;
1445 unsigned TotalIRArgs;
1446
1447
1448 struct IRArgs {
1449 unsigned PaddingArgIndex;
1450
1451
1452 unsigned FirstArgIndex;
1453 unsigned NumberOfArgs;
1454
1455 IRArgs()
1456 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1457 NumberOfArgs(0) {}
1458 };
1459
1461
1462public:
1464 bool OnlyRequiredArgs = false)
1465 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1466 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1467 construct(Context, FI, OnlyRequiredArgs);
1468 }
1469
1470 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1471 unsigned getInallocaArgNo() const {
1472 assert(hasInallocaArg());
1473 return InallocaArgNo;
1474 }
1475
1476 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1477 unsigned getSRetArgNo() const {
1478 assert(hasSRetArg());
1479 return SRetArgNo;
1480 }
1481
1482 unsigned totalIRArgs() const { return TotalIRArgs; }
1483
1484 bool hasPaddingArg(unsigned ArgNo) const {
1485 assert(ArgNo < ArgInfo.size());
1486 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1487 }
1488 unsigned getPaddingArgNo(unsigned ArgNo) const {
1489 assert(hasPaddingArg(ArgNo));
1490 return ArgInfo[ArgNo].PaddingArgIndex;
1491 }
1492
1493
1494
1495 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1496 assert(ArgNo < ArgInfo.size());
1497 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1498 ArgInfo[ArgNo].NumberOfArgs);
1499 }
1500
1501private:
1503 bool OnlyRequiredArgs);
1504};
1505
1506void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1508 bool OnlyRequiredArgs) {
1509 unsigned IRArgNo = 0;
1510 bool SwapThisWithSRet = false;
1512
1515 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1516 }
1517
1518 unsigned ArgNo = 0;
1521 ++I, ++ArgNo) {
1522 assert(I != FI.arg_end());
1525
1526 auto &IRArgs = ArgInfo[ArgNo];
1527
1529 IRArgs.PaddingArgIndex = IRArgNo++;
1530
1534
1535 llvm::StructType *STy = dyn_castllvm::StructType(AI.getCoerceToType());
1537 IRArgs.NumberOfArgs = STy->getNumElements();
1538 } else {
1539 IRArgs.NumberOfArgs = 1;
1540 }
1541 break;
1542 }
1545 IRArgs.NumberOfArgs = 1;
1546 break;
1549
1550 IRArgs.NumberOfArgs = 0;
1551 break;
1554 break;
1557 break;
1558 }
1559
1560 if (IRArgs.NumberOfArgs > 0) {
1561 IRArgs.FirstArgIndex = IRArgNo;
1562 IRArgNo += IRArgs.NumberOfArgs;
1563 }
1564
1565
1566
1567 if (IRArgNo == 1 && SwapThisWithSRet)
1568 IRArgNo++;
1569 }
1570 assert(ArgNo == ArgInfo.size());
1571
1573 InallocaArgNo = IRArgNo++;
1574
1575 TotalIRArgs = IRArgNo;
1576}
1577}
1578
1579
1580
1583 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1584}
1585
1589}
1590
1594}
1595
1598 switch (BT->getKind()) {
1599 default:
1600 return false;
1601 case BuiltinType::Float:
1603 case BuiltinType::Double:
1605 case BuiltinType::LongDouble:
1607 }
1608 }
1609
1610 return false;
1611}
1612
1616 if (BT->getKind() == BuiltinType::LongDouble)
1618 }
1619 }
1620
1621 return false;
1622}
1623
1627}
1628
1629llvm::FunctionType *
1631
1632 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1633 (void)Inserted;
1634 assert(Inserted && "Recursively being processed?");
1635
1636 llvm::Type *resultType = nullptr;
1638 switch (retAI.getKind()) {
1641 llvm_unreachable("Invalid ABI kind for return argument");
1642
1646 break;
1647
1650
1653 resultType = llvm::PointerType::get(getLLVMContext(), addressSpace);
1654 } else {
1655 resultType = llvm::Type::getVoidTy(getLLVMContext());
1656 }
1657 break;
1658
1661 resultType = llvm::Type::getVoidTy(getLLVMContext());
1662 break;
1663
1666 break;
1667 }
1668
1669 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1671
1672
1673 if (IRFunctionArgs.hasSRetArg()) {
1676 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1677 llvm::PointerType::get(getLLVMContext(), AddressSpace);
1678 }
1679
1680
1681 if (IRFunctionArgs.hasInallocaArg())
1682 ArgTypes[IRFunctionArgs.getInallocaArgNo()] =
1684
1685
1686 unsigned ArgNo = 0;
1689 for (; it != ie; ++it, ++ArgNo) {
1691
1692
1693 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1694 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1696
1697 unsigned FirstIRArg, NumIRArgs;
1698 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1699
1700 switch (ArgInfo.getKind()) {
1703 assert(NumIRArgs == 0);
1704 break;
1705
1707 assert(NumIRArgs == 1);
1708
1709 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1711 break;
1713 assert(NumIRArgs == 1);
1714 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1716 break;
1719
1720
1722 llvm::StructType *st = dyn_castllvm::StructType(argType);
1724 assert(NumIRArgs == st->getNumElements());
1725 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1726 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1727 } else {
1728 assert(NumIRArgs == 1);
1729 ArgTypes[FirstIRArg] = argType;
1730 }
1731 break;
1732 }
1733
1735 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1737 *ArgTypesIter++ = EltTy;
1738 }
1739 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1740 break;
1741 }
1742
1744 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1746 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1747 break;
1748 }
1749 }
1750
1751 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1752 assert(Erased && "Not in set?");
1753
1754 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1755}
1756
1760
1763
1765}
1766
1768 llvm::AttrBuilder &FuncAttrs,
1770 if (!FPT)
1771 return;
1772
1775 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1776
1779 FuncAttrs.addAttribute("aarch64_pstate_sm_enabled");
1781 FuncAttrs.addAttribute("aarch64_pstate_sm_compatible");
1783 FuncAttrs.addAttribute("aarch64_za_state_agnostic");
1784
1785
1787 FuncAttrs.addAttribute("aarch64_preserves_za");
1789 FuncAttrs.addAttribute("aarch64_in_za");
1791 FuncAttrs.addAttribute("aarch64_out_za");
1793 FuncAttrs.addAttribute("aarch64_inout_za");
1794
1795
1797 FuncAttrs.addAttribute("aarch64_preserves_zt0");
1799 FuncAttrs.addAttribute("aarch64_in_zt0");
1801 FuncAttrs.addAttribute("aarch64_out_zt0");
1803 FuncAttrs.addAttribute("aarch64_inout_zt0");
1804}
1805
1807 const Decl *Callee) {
1808 if (!Callee)
1809 return;
1810
1812
1813 for (const OMPAssumeAttr *AA : Callee->specific_attrs())
1814 AA->getAssumption().split(Attrs, ",");
1815
1816 if (!Attrs.empty())
1817 FuncAttrs.addAttribute(llvm::AssumptionAttrKey,
1818 llvm::join(Attrs.begin(), Attrs.end(), ","));
1819}
1820
1822 QualType ReturnType) const {
1823
1824
1827 if (const auto *ClassDecl = dyn_cast(RT->getDecl()))
1828 return ClassDecl->hasTrivialDestructor();
1829 }
1831}
1832
1834 const Decl *TargetDecl) {
1835
1836
1837
1838
1839
1840 if (Module.getLangOpts().Sanitize.has(SanitizerKind::Memory))
1841 return true;
1842
1843
1844 if (.getLangOpts().CPlusPlus)
1845 return false;
1846 if (TargetDecl) {
1847 if (const FunctionDecl *FDecl = dyn_cast(TargetDecl)) {
1848 if (FDecl->isExternC())
1849 return false;
1850 } else if (const VarDecl *VDecl = dyn_cast(TargetDecl)) {
1851
1852 if (VDecl->isExternC())
1853 return false;
1854 }
1855 }
1856
1857
1858
1859
1860 return Module.getCodeGenOpts().StrictReturn ||
1861 .MayDropFunctionReturn(Module.getContext(), RetTy) ||
1862 Module.getLangOpts().Sanitize.has(SanitizerKind::Return);
1863}
1864
1865
1866
1867
1869 llvm::DenormalMode FP32DenormalMode,
1870 llvm::AttrBuilder &FuncAttrs) {
1871 if (FPDenormalMode != llvm::DenormalMode::getDefault())
1872 FuncAttrs.addAttribute("denormal-fp-math", FPDenormalMode.str());
1873
1874 if (FP32DenormalMode != FPDenormalMode && FP32DenormalMode.isValid())
1875 FuncAttrs.addAttribute("denormal-fp-math-f32", FP32DenormalMode.str());
1876}
1877
1878
1879
1880
1881static void
1883 llvm::AttrBuilder &FuncAttrs) {
1885 FuncAttrs);
1886}
1887
1889 StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts,
1890 const LangOptions &LangOpts, bool AttrOnCallSite,
1891 llvm::AttrBuilder &FuncAttrs) {
1892
1893 if (!HasOptnone) {
1894 if (CodeGenOpts.OptimizeSize)
1895 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1896 if (CodeGenOpts.OptimizeSize == 2)
1897 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1898 }
1899
1900 if (CodeGenOpts.DisableRedZone)
1901 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1902 if (CodeGenOpts.IndirectTlsSegRefs)
1903 FuncAttrs.addAttribute("indirect-tls-seg-refs");
1904 if (CodeGenOpts.NoImplicitFloat)
1905 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1906
1907 if (AttrOnCallSite) {
1908
1909
1910
1911 if (!CodeGenOpts.SimplifyLibCalls || LangOpts.isNoBuiltinFunc(Name))
1912 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1914 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1915 } else {
1916 switch (CodeGenOpts.getFramePointer()) {
1918
1919 break;
1923 FuncAttrs.addAttribute("frame-pointer",
1925 CodeGenOpts.getFramePointer()));
1926 }
1927
1928 if (CodeGenOpts.LessPreciseFPMAD)
1929 FuncAttrs.addAttribute("less-precise-fpmad", "true");
1930
1931 if (CodeGenOpts.NullPointerIsValid)
1932 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
1933
1935 FuncAttrs.addAttribute("no-trapping-math", "true");
1936
1937
1938
1939 if (LangOpts.NoHonorInfs)
1940 FuncAttrs.addAttribute("no-infs-fp-math", "true");
1941 if (LangOpts.NoHonorNaNs)
1942 FuncAttrs.addAttribute("no-nans-fp-math", "true");
1943 if (LangOpts.ApproxFunc)
1944 FuncAttrs.addAttribute("approx-func-fp-math", "true");
1945 if (LangOpts.AllowFPReassoc && LangOpts.AllowRecip &&
1946 LangOpts.NoSignedZero && LangOpts.ApproxFunc &&
1947 (LangOpts.getDefaultFPContractMode() ==
1949 LangOpts.getDefaultFPContractMode() ==
1951 FuncAttrs.addAttribute("unsafe-fp-math", "true");
1952 if (CodeGenOpts.SoftFloat)
1953 FuncAttrs.addAttribute("use-soft-float", "true");
1954 FuncAttrs.addAttribute("stack-protector-buffer-size",
1955 llvm::utostr(CodeGenOpts.SSPBufferSize));
1956 if (LangOpts.NoSignedZero)
1957 FuncAttrs.addAttribute("no-signed-zeros-fp-math", "true");
1958
1959
1960 const std::vectorstd::string &Recips = CodeGenOpts.Reciprocals;
1961 if (!Recips.empty())
1962 FuncAttrs.addAttribute("reciprocal-estimates",
1963 llvm::join(Recips, ","));
1964
1967 FuncAttrs.addAttribute("prefer-vector-width",
1969
1970 if (CodeGenOpts.StackRealignment)
1971 FuncAttrs.addAttribute("stackrealign");
1972 if (CodeGenOpts.Backchain)
1973 FuncAttrs.addAttribute("backchain");
1974 if (CodeGenOpts.EnableSegmentedStacks)
1975 FuncAttrs.addAttribute("split-stack");
1976
1977 if (CodeGenOpts.SpeculativeLoadHardening)
1978 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1979
1980
1981 switch (CodeGenOpts.getZeroCallUsedRegs()) {
1982 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip:
1983 FuncAttrs.removeAttribute("zero-call-used-regs");
1984 break;
1985 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg:
1986 FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr-arg");
1987 break;
1988 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR:
1989 FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr");
1990 break;
1991 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg:
1992 FuncAttrs.addAttribute("zero-call-used-regs", "used-arg");
1993 break;
1994 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used:
1995 FuncAttrs.addAttribute("zero-call-used-regs", "used");
1996 break;
1997 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg:
1998 FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr-arg");
1999 break;
2000 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR:
2001 FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr");
2002 break;
2003 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg:
2004 FuncAttrs.addAttribute("zero-call-used-regs", "all-arg");
2005 break;
2006 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All:
2007 FuncAttrs.addAttribute("zero-call-used-regs", "all");
2008 break;
2009 }
2010 }
2011
2013
2014
2015
2016
2017
2018 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2019 }
2020
2021
2022
2023 if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL ||
2024 LangOpts.SYCLIsDevice) {
2025 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2026 }
2027
2028 if (CodeGenOpts.SaveRegParams && !AttrOnCallSite)
2029 FuncAttrs.addAttribute("save-reg-params");
2030
2032 StringRef Var, Value;
2033 std::tie(Var, Value) = Attr.split('=');
2034 FuncAttrs.addAttribute(Var, Value);
2035 }
2036
2039}
2040
2041
2042
2043
2044
2045
2046static void
2048 const llvm::Function &F,
2050 auto FFeatures = F.getFnAttribute("target-features");
2051
2052 llvm::StringSet<> MergedNames;
2054 MergedFeatures.reserve(TargetOpts.Features.size());
2055
2056 auto AddUnmergedFeatures = [&](auto &&FeatureRange) {
2057 for (StringRef Feature : FeatureRange) {
2058 if (Feature.empty())
2059 continue;
2060 assert(Feature[0] == '+' || Feature[0] == '-');
2061 StringRef Name = Feature.drop_front(1);
2062 bool Merged = !MergedNames.insert(Name).second;
2063 if (!Merged)
2064 MergedFeatures.push_back(Feature);
2065 }
2066 };
2067
2068 if (FFeatures.isValid())
2069 AddUnmergedFeatures(llvm::split(FFeatures.getValueAsString(), ','));
2070 AddUnmergedFeatures(TargetOpts.Features);
2071
2072 if (!MergedFeatures.empty()) {
2073 llvm::sort(MergedFeatures);
2074 FuncAttr.addAttribute("target-features", llvm::join(MergedFeatures, ","));
2075 }
2076}
2077
2079 llvm::Function &F, const CodeGenOptions &CodeGenOpts,
2081 bool WillInternalize) {
2082
2083 llvm::AttrBuilder FuncAttrs(F.getContext());
2084
2085
2086 if (!TargetOpts.CPU.empty())
2087 FuncAttrs.addAttribute("target-cpu", TargetOpts.CPU);
2088 if (!TargetOpts.TuneCPU.empty())
2089 FuncAttrs.addAttribute("tune-cpu", TargetOpts.TuneCPU);
2090
2092 CodeGenOpts, LangOpts,
2093 false, FuncAttrs);
2094
2095 if (!WillInternalize && F.isInterposable()) {
2096
2097
2098
2099
2100 F.addFnAttrs(FuncAttrs);
2101 return;
2102 }
2103
2104 llvm::AttributeMask AttrsToRemove;
2105
2106 llvm::DenormalMode DenormModeToMerge = F.getDenormalModeRaw();
2107 llvm::DenormalMode DenormModeToMergeF32 = F.getDenormalModeF32Raw();
2108 llvm::DenormalMode Merged =
2109 CodeGenOpts.FPDenormalMode.mergeCalleeMode(DenormModeToMerge);
2110 llvm::DenormalMode MergedF32 = CodeGenOpts.FP32DenormalMode;
2111
2112 if (DenormModeToMergeF32.isValid()) {
2113 MergedF32 =
2114 CodeGenOpts.FP32DenormalMode.mergeCalleeMode(DenormModeToMergeF32);
2115 }
2116
2117 if (Merged == llvm::DenormalMode::getDefault()) {
2118 AttrsToRemove.addAttribute("denormal-fp-math");
2119 } else if (Merged != DenormModeToMerge) {
2120
2121 FuncAttrs.addAttribute("denormal-fp-math",
2123 }
2124
2125 if (MergedF32 == llvm::DenormalMode::getDefault()) {
2126 AttrsToRemove.addAttribute("denormal-fp-math-f32");
2127 } else if (MergedF32 != DenormModeToMergeF32) {
2128
2129 FuncAttrs.addAttribute("denormal-fp-math-f32",
2131 }
2132
2133 F.removeFnAttrs(AttrsToRemove);
2135
2137
2138 F.addFnAttrs(FuncAttrs);
2139}
2140
2141void CodeGenModule::getTrivialDefaultFunctionAttributes(
2142 StringRef Name, bool HasOptnone, bool AttrOnCallSite,
2143 llvm::AttrBuilder &FuncAttrs) {
2144 ::getTrivialDefaultFunctionAttributes(Name, HasOptnone, getCodeGenOpts(),
2146 FuncAttrs);
2147}
2148
2149void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
2150 bool HasOptnone,
2151 bool AttrOnCallSite,
2152 llvm::AttrBuilder &FuncAttrs) {
2153 getTrivialDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite,
2154 FuncAttrs);
2155
2156
2157 if (!AttrOnCallSite)
2159}
2160
2162 llvm::AttrBuilder &attrs) {
2163 getDefaultFunctionAttributes( "", false,
2164 false, attrs);
2165 GetCPUAndFeaturesAttributes(GlobalDecl(), attrs);
2166}
2167
2170 const NoBuiltinAttr *NBA = nullptr) {
2171 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
2173 AttributeName += "no-builtin-";
2174 AttributeName += BuiltinName;
2175 FuncAttrs.addAttribute(AttributeName);
2176 };
2177
2178
2179 if (LangOpts.NoBuiltin) {
2180
2181 FuncAttrs.addAttribute("no-builtins");
2182 return;
2183 }
2184
2185
2186 llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr);
2187
2188
2189
2190 if (!NBA)
2191 return;
2192
2193
2194
2195 if (llvm::is_contained(NBA->builtinNames(), "*")) {
2196 FuncAttrs.addAttribute("no-builtins");
2197 return;
2198 }
2199
2200
2201 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
2202}
2203
2205 const llvm::DataLayout &DL, const ABIArgInfo &AI,
2206 bool CheckCoerce = true) {
2207 llvm::Type *Ty = Types.ConvertTypeForMem(QTy);
2210 return true;
2212 return true;
2213 if (!DL.typeSizeEqualsStoreSize(Ty))
2214
2215
2216
2217 return false;
2220 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
2221 DL.getTypeSizeInBits(Ty)))
2222
2223
2224
2225
2226 return false;
2227 }
2229 return true;
2231 return true;
2233 return false;
2235
2236
2237 return false;
2241 return true;
2242 }
2245 if (const MatrixType *Matrix = dyn_cast(QTy))
2246 return DetermineNoUndef(Matrix->getElementType(), Types, DL, AI, false);
2247 if (const ArrayType *Array = dyn_cast(QTy))
2248 return DetermineNoUndef(Array->getElementType(), Types, DL, AI, false);
2249
2250
2251 return false;
2252}
2253
2254
2256 unsigned NumRequiredArgs, unsigned ArgNo) {
2257 const auto *FD = dyn_cast_or_null(TargetDecl);
2258 if (!FD)
2259 return false;
2260
2261
2262 if (ArgNo >= NumRequiredArgs)
2263 return false;
2264
2265
2266 if (ArgNo < FD->getNumParams()) {
2267 const ParmVarDecl *Param = FD->getParamDecl(ArgNo);
2268 if (Param && Param->hasAttr())
2269 return true;
2270 }
2271
2272 return false;
2273}
2274
2275
2276
2278 bool IsReturn) {
2279
2281 return false;
2282
2283
2285 if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy))
2286 return true;
2287
2288 if (llvm::StructType *ST = dyn_castllvm::StructType(IRTy)) {
2290 llvm::all_of(ST->elements(), [](llvm::Type *Ty) {
2291 return llvm::AttributeFuncs::isNoFPClassCompatibleType(Ty);
2292 });
2293 }
2294
2295 return false;
2296}
2297
2298
2300 llvm::FPClassTest Mask = llvm::fcNone;
2301 if (LangOpts.NoHonorInfs)
2302 Mask |= llvm::fcInf;
2303 if (LangOpts.NoHonorNaNs)
2304 Mask |= llvm::fcNan;
2305 return Mask;
2306}
2307
2310 llvm::AttributeList &Attrs) {
2311 if (Attrs.getMemoryEffects().getModRef() == llvm::ModRefInfo::NoModRef) {
2312 Attrs = Attrs.removeFnAttribute(getLLVMContext(), llvm::Attribute::Memory);
2313 llvm::Attribute MemoryAttr = llvm::Attribute::getWithMemoryEffects(
2314 getLLVMContext(), llvm::MemoryEffects::writeOnly());
2315 Attrs = Attrs.addFnAttribute(getLLVMContext(), MemoryAttr);
2316 }
2317}
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2339 llvm::AttributeList &AttrList,
2341 bool AttrOnCallSite, bool IsThunk) {
2344
2345
2346
2349 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2351 FuncAttrs.addAttribute("cmse_nonsecure_call");
2352
2353
2356
2358
2359
2360
2362
2363 bool HasOptnone = false;
2364
2365 const NoBuiltinAttr *NBA = nullptr;
2366
2367
2368
2369 auto AddPotentialArgAccess = [&]() {
2370 llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory);
2371 if (A.isValid())
2372 FuncAttrs.addMemoryAttr(A.getMemoryEffects() |
2373 llvm::MemoryEffects::argMemOnly());
2374 };
2375
2376
2377
2378
2379 if (TargetDecl) {
2380 if (TargetDecl->hasAttr())
2381 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2382 if (TargetDecl->hasAttr())
2383 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2384 if (TargetDecl->hasAttr())
2385 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2386 if (TargetDecl->hasAttr())
2387 FuncAttrs.addAttribute(llvm::Attribute::Cold);
2388 if (TargetDecl->hasAttr())
2389 FuncAttrs.addAttribute(llvm::Attribute::Hot);
2390 if (TargetDecl->hasAttr())
2391 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
2392 if (TargetDecl->hasAttr())
2393 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2394
2395 if (const FunctionDecl *Fn = dyn_cast(TargetDecl)) {
2398 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2399
2400 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2402 (Kind == OO_New || Kind == OO_Array_New))
2403 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2404 }
2405 const CXXMethodDecl *MD = dyn_cast(Fn);
2406 const bool IsVirtualCall = MD && MD->isVirtual();
2407
2408
2409 if (!(AttrOnCallSite && IsVirtualCall)) {
2410 if (Fn->isNoReturn())
2411 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2412 NBA = Fn->getAttr();
2413 }
2414 }
2415
2416 if (isa(TargetDecl) || isa(TargetDecl)) {
2417
2418
2419 if (AttrOnCallSite && TargetDecl->hasAttr())
2420 FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
2421 }
2422
2423
2424 if (TargetDecl->hasAttr()) {
2425 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none());
2426 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2427
2428
2429 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2430 } else if (TargetDecl->hasAttr()) {
2431 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
2432 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2433
2434 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2435 } else if (TargetDecl->hasAttr()) {
2436 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::inaccessibleOrArgMemOnly());
2437 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2438 }
2439 if (TargetDecl->hasAttr())
2440 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2441 if (TargetDecl->hasAttr() &&
2442 !CodeGenOpts.NullPointerIsValid)
2443 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2444 if (TargetDecl->hasAttr())
2445 FuncAttrs.addAttribute("no_caller_saved_registers");
2446 if (TargetDecl->hasAttr())
2447 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2448 if (TargetDecl->hasAttr())
2449 FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2450 if (TargetDecl->hasAttr())
2451 FuncAttrs.addAttribute("bpf_fastcall");
2452
2453 HasOptnone = TargetDecl->hasAttr();
2454 if (auto *AllocSize = TargetDecl->getAttr()) {
2455 std::optional NumElemsParam;
2456 if (AllocSize->getNumElemsParam().isValid())
2457 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2458 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2459 NumElemsParam);
2460 }
2461
2462 if (TargetDecl->hasAttr()) {
2463 if (getLangOpts().OpenCLVersion <= 120) {
2464
2465 FuncAttrs.addAttribute("uniform-work-group-size", "true");
2466 } else {
2467
2468
2469
2470
2471
2472 FuncAttrs.addAttribute(
2473 "uniform-work-group-size",
2474 llvm::toStringRef(getLangOpts().OffloadUniformBlock));
2475 }
2476 }
2477
2478 if (TargetDecl->hasAttr() &&
2480 FuncAttrs.addAttribute("uniform-work-group-size", "true");
2481
2482 if (TargetDecl->hasAttr())
2483 FuncAttrs.addAttribute("aarch64_pstate_sm_body");
2484 }
2485
2486
2487
2488
2489
2490
2491
2493
2494
2495 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2496
2497
2498
2499 if (TargetDecl) {
2500 if (TargetDecl->hasAttr())
2501 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2502 if (TargetDecl->hasAttr())
2503 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2504 if (TargetDecl->hasAttr())
2505 FuncAttrs.removeAttribute("split-stack");
2506 if (TargetDecl->hasAttr()) {
2507
2508 auto Kind =
2509 TargetDecl->getAttr()->getZeroCallUsedRegs();
2510 FuncAttrs.removeAttribute("zero-call-used-regs");
2511 FuncAttrs.addAttribute(
2512 "zero-call-used-regs",
2513 ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind));
2514 }
2515
2516
2517
2518
2519
2520 if (CodeGenOpts.NoPLT) {
2521 if (auto *Fn = dyn_cast(TargetDecl)) {
2522 if (!Fn->isDefined() && !AttrOnCallSite) {
2523 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2524 }
2525 }
2526 }
2527
2528 if (TargetDecl->hasAttr())
2529 FuncAttrs.removeAttribute(llvm::Attribute::Convergent);
2530 }
2531
2532
2533
2534 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
2535 if (const auto *FD = dyn_cast_or_null(TargetDecl)) {
2536 if (!FD->isExternallyVisible())
2537 FuncAttrs.addAttribute("sample-profile-suffix-elision-policy",
2538 "selected");
2539 }
2540 }
2541
2542
2543
2544 if (!AttrOnCallSite) {
2545 if (TargetDecl && TargetDecl->hasAttr())
2546 FuncAttrs.addAttribute("cmse_nonsecure_entry");
2547
2548
2549 auto shouldDisableTailCalls = [&] {
2550
2551 if (CodeGenOpts.DisableTailCalls)
2552 return true;
2553
2554 if (!TargetDecl)
2555 return false;
2556
2557 if (TargetDecl->hasAttr() ||
2558 TargetDecl->hasAttr())
2559 return true;
2560
2561 if (CodeGenOpts.NoEscapingBlockTailCalls) {
2562 if (const auto *BD = dyn_cast(TargetDecl))
2563 if (!BD->doesNotEscape())
2564 return true;
2565 }
2566
2567 return false;
2568 };
2569 if (shouldDisableTailCalls())
2570 FuncAttrs.addAttribute("disable-tail-calls", "true");
2571
2572
2573
2574 GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
2575 }
2576
2577
2578 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
2579
2583
2584
2585 if (CodeGenOpts.EnableNoundefAttrs &&
2589 RetAttrs.addAttribute(llvm::Attribute::NoUndef);
2590 }
2591
2592 switch (RetAI.getKind()) {
2595 RetAttrs.addAttribute(llvm::Attribute::SExt);
2597 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2598 else
2599 RetAttrs.addAttribute(llvm::Attribute::NoExt);
2600 [[fallthrough]];
2603 RetAttrs.addAttribute(llvm::Attribute::InReg);
2604
2607
2608 break;
2610 break;
2611
2614
2615 AddPotentialArgAccess();
2616 break;
2617 }
2618
2620 break;
2621
2624 llvm_unreachable("Invalid ABI kind for return argument");
2625 }
2626
2627 if (!IsThunk) {
2628
2632 RetAttrs.addDereferenceableAttr(
2634 if (getTypes().getTargetAddressSpace(PTy) == 0 &&
2635 !CodeGenOpts.NullPointerIsValid)
2636 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2638 llvm::Align Alignment =
2640 RetAttrs.addAlignmentAttr(Alignment);
2641 }
2642 }
2643 }
2644
2645 bool hasUsedSRet = false;
2647
2648
2649 if (IRFunctionArgs.hasSRetArg()) {
2651 SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy));
2652 SRETAttrs.addAttribute(llvm::Attribute::Writable);
2653 SRETAttrs.addAttribute(llvm::Attribute::DeadOnUnwind);
2654 hasUsedSRet = true;
2656 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2658 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2659 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2660 }
2661
2662
2663 if (IRFunctionArgs.hasInallocaArg()) {
2666 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2668 }
2669
2670
2671
2672
2673 if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() &&
2674 !FI.arg_begin()->type->isVoidPointerType() && !IsThunk) {
2675 auto IRArgs = IRFunctionArgs.getIRArgs(0);
2676
2677 assert(IRArgs.second == 1 && "Expected only a single `this` pointer.");
2678
2680
2683
2684 if (!CodeGenOpts.NullPointerIsValid &&
2686 Attrs.addAttribute(llvm::Attribute::NonNull);
2688 } else {
2689
2690
2691
2692
2693 Attrs.addDereferenceableOrNullAttr(
2697 }
2698
2699 llvm::Align Alignment =
2701 nullptr, true)
2703 Attrs.addAlignmentAttr(Alignment);
2704
2705 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(getLLVMContext(), Attrs);
2706 }
2707
2708 unsigned ArgNo = 0;
2711 I != E; ++I, ++ArgNo) {
2712 QualType ParamType = I->type;
2715
2716
2717 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2719 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2720 llvm::AttributeSet::get(
2722 llvm::AttrBuilder(getLLVMContext()).addAttribute(llvm::Attribute::InReg));
2723 }
2724 }
2725
2726
2727 if (CodeGenOpts.EnableNoundefAttrs &&
2729 Attrs.addAttribute(llvm::Attribute::NoUndef);
2730 }
2731
2732
2733
2734
2738 Attrs.addAttribute(llvm::Attribute::SExt);
2740 Attrs.addAttribute(llvm::Attribute::ZExt);
2741 else
2742 Attrs.addAttribute(llvm::Attribute::NoExt);
2743 [[fallthrough]];
2746 Attrs.addAttribute(llvm::Attribute::Nest);
2748 Attrs.addAttribute(llvm::Attribute::InReg);
2749 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign()));
2750
2753 break;
2756 Attrs.addAttribute(llvm::Attribute::InReg);
2757
2759 Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
2760
2762 if (CodeGenOpts.PassByValueIsNoAlias && Decl &&
2763 Decl->getArgPassingRestrictions() ==
2765
2766
2767 Attrs.addAttribute(llvm::Attribute::NoAlias);
2768
2769
2770
2771
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784 assert(!Align.isZero());
2785
2786
2787
2789 Attrs.addAlignmentAttr(Align.getQuantity());
2790
2791
2792 AddPotentialArgAccess();
2793 break;
2794 }
2797 Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType));
2798 Attrs.addAlignmentAttr(Align.getQuantity());
2799 break;
2800 }
2804 break;
2805
2807
2808 AddPotentialArgAccess();
2809 continue;
2810 }
2811
2815 Attrs.addDereferenceableAttr(
2817 if (getTypes().getTargetAddressSpace(PTy) == 0 &&
2818 !CodeGenOpts.NullPointerIsValid)
2819 Attrs.addAttribute(llvm::Attribute::NonNull);
2821 llvm::Align Alignment =
2823 Attrs.addAlignmentAttr(Alignment);
2824 }
2825 }
2826
2827
2828
2829
2830
2831 if (TargetDecl && TargetDecl->hasAttr() &&
2835 llvm::Align Alignment =
2837 Attrs.addAlignmentAttr(Alignment);
2838 }
2839 }
2840
2844 Attrs.addAttribute(llvm::Attribute::NoAlias);
2845 break;
2847 break;
2848
2850
2851
2852 if (!hasUsedSRet && RetTy->isVoidType()) {
2853 Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType));
2854 hasUsedSRet = true;
2855 }
2856
2857
2858 Attrs.addAttribute(llvm::Attribute::NoAlias);
2859
2860
2862 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2864 Attrs.addDereferenceableAttr(info.Width.getQuantity());
2865 Attrs.addAlignmentAttr(info.Align.getAsAlign());
2866 }
2867 break;
2868 }
2869
2871 Attrs.addAttribute(llvm::Attribute::SwiftError);
2872 break;
2873
2875 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2876 break;
2877
2879 Attrs.addAttribute(llvm::Attribute::SwiftAsync);
2880 break;
2881 }
2882
2884 Attrs.addAttribute(llvm::Attribute::NoCapture);
2885
2886 if (Attrs.hasAttributes()) {
2887 unsigned FirstIRArg, NumIRArgs;
2888 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2889 for (unsigned i = 0; i < NumIRArgs; i++)
2890 ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes(
2892 }
2893 }
2894 assert(ArgNo == FI.arg_size());
2895
2896 AttrList = llvm::AttributeList::get(
2898 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2899}
2900
2901
2902
2905 llvm::Value *value) {
2906 llvm::Type *varType = CGF.ConvertType(var->getType());
2907
2908
2909
2910 if (value->getType() == varType) return value;
2911
2912 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2913 && "unexpected promotion type");
2914
2915 if (isallvm::IntegerType(varType))
2916 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2917
2918 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2919}
2920
2921
2922
2924 QualType ArgType, unsigned ArgNo) {
2925
2926
2927
2928
2929
2930
2931
2933 return nullptr;
2934
2935 if (PVD) {
2936 if (auto ParmNNAttr = PVD->getAttr())
2937 return ParmNNAttr;
2938 }
2939
2940 if (!FD)
2941 return nullptr;
2942 for (const auto *NNAttr : FD->specific_attrs()) {
2943 if (NNAttr->isNonNull(ArgNo))
2944 return NNAttr;
2945 }
2946 return nullptr;
2947}
2948
2949namespace {
2953 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2957 }
2958 };
2959}
2960
2962 llvm::Function *Fn,
2965
2966 return;
2967
2968
2969
2970
2971
2973 if (FD->hasImplicitReturnZero()) {
2974 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2976 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2978 }
2979 }
2980
2981
2982
2983
2984 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2985 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs());
2986
2987
2988
2990 if (IRFunctionArgs.hasInallocaArg())
2991 ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
2993
2994
2995 if (IRFunctionArgs.hasSRetArg()) {
2996 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
2997 AI->setName("agg.result");
2998 AI->addAttr(llvm::Attribute::NoAlias);
2999 }
3000
3001
3002
3003
3005 ArgVals.reserve(Args.size());
3006
3007
3008
3009
3010
3011 assert(FI.arg_size() == Args.size() &&
3012 "Mismatch between function signature & arguments.");
3013 unsigned ArgNo = 0;
3015 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
3016 i != e; ++i, ++info_it, ++ArgNo) {
3017 const VarDecl *Arg = *i;
3019
3020 bool isPromoted =
3021 isa(Arg) && cast(Arg)->isKNRPromoted();
3022
3023
3024
3028
3029 unsigned FirstIRArg, NumIRArgs;
3030 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3031
3032 switch (ArgI.getKind()) {
3034 assert(NumIRArgs == 0);
3040 getContext().getTypeAlignInChars(Ty));
3042 break;
3043 }
3044
3047 assert(NumIRArgs == 1);
3049 Fn->getArg(FirstIRArg), Ty, ArgI.getIndirectAlign(), false, nullptr,
3051
3053
3054
3055
3056
3059
3060
3061
3062
3063
3064
3070 llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()));
3071 ParamAddr = AlignedTemp;
3072 }
3074 } else {
3075
3076 llvm::Value *V =
3078
3079 if (isPromoted)
3082 }
3083 break;
3084 }
3085
3088 auto AI = Fn->getArg(FirstIRArg);
3090
3091
3092
3093
3094 if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() &&
3096 assert(NumIRArgs == 1);
3097
3098 if (const ParmVarDecl *PVD = dyn_cast(Arg)) {
3099
3101 PVD->getFunctionScopeIndex()) &&
3103 AI->addAttr(llvm::Attribute::NonNull);
3104
3105 QualType OTy = PVD->getOriginalType();
3106 if (const auto *ArrTy =
3107 getContext().getAsConstantArrayType(OTy)) {
3108
3109
3110
3111
3113 QualType ETy = ArrTy->getElementType();
3114 llvm::Align Alignment =
3116 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment));
3117 uint64_t ArrSize = ArrTy->getZExtSize();
3119 ArrSize) {
3121 Attrs.addDereferenceableAttr(
3122 getContext().getTypeSizeInChars(ETy).getQuantity() *
3123 ArrSize);
3124 AI->addAttrs(Attrs);
3125 } else if (getContext().getTargetInfo().getNullPointerValue(
3128 AI->addAttr(llvm::Attribute::NonNull);
3129 }
3130 }
3131 } else if (const auto *ArrTy =
3132 getContext().getAsVariableArrayType(OTy)) {
3133
3134
3135
3137 QualType ETy = ArrTy->getElementType();
3138 llvm::Align Alignment =
3140 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment));
3141 if (().getTargetAddressSpace(ETy) &&
3143 AI->addAttr(llvm::Attribute::NonNull);
3144 }
3145 }
3146
3147
3148 const auto *AVAttr = PVD->getAttr();
3149 if (!AVAttr)
3151 AVAttr = TOTy->getDecl()->getAttr();
3152 if (AVAttr && .has(SanitizerKind::Alignment)) {
3153
3154
3155
3156 llvm::ConstantInt *AlignmentCI =
3157 castllvm::ConstantInt(EmitScalarExpr(AVAttr->getAlignment()));
3159 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
3160 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
3161 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
3162 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(
3163 llvm::Align(AlignmentInt)));
3164 }
3165 }
3166 }
3167
3168
3170 AI->addAttr(llvm::Attribute::NoAlias);
3171 }
3172
3173
3174
3178 assert(NumIRArgs == 1);
3179
3180
3181
3182 llvm::Value *V = AI;
3190 V, pointeeTy, getContext().getTypeAlignInChars(pointeeTy));
3194
3195
3196
3197
3199 }
3200
3201
3204
3205 if (isPromoted)
3207
3208
3209
3210
3211
3213 if (V->getType() != LTy)
3214 V = Builder.CreateBitCast(V, LTy);
3215
3217 break;
3218 }
3219
3220
3221
3222
3223
3224 if (auto *VecTyTo = dyn_castllvm::FixedVectorType(ConvertType(Ty))) {
3225 llvm::Value *ArgVal = Fn->getArg(FirstIRArg);
3226 if (auto *VecTyFrom =
3227 dyn_castllvm::ScalableVectorType(ArgVal->getType())) {
3229 *this, VecTyTo, VecTyFrom, ArgVal, Arg->getName());
3230 if (Extracted) {
3231 assert(NumIRArgs == 1);
3233 break;
3234 }
3235 }
3236 }
3237
3238 llvm::StructType *STy =
3242
3243
3245
3246
3247
3249 STy->getNumElements() > 1) {
3250 llvm::TypeSize StructSize = CGM.getDataLayout().getTypeAllocSize(STy);
3251 llvm::TypeSize PtrElementSize =
3253 if (StructSize.isScalable()) {
3254 assert(STy->containsHomogeneousScalableVectorTypes() &&
3255 "ABI only supports structure with homogeneous scalable vector "
3256 "type");
3257 assert(StructSize == PtrElementSize &&
3258 "Only allow non-fractional movement of structure with"
3259 "homogeneous scalable vector type");
3260 assert(STy->getNumElements() == NumIRArgs);
3261
3262 llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy);
3263 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3264 auto *AI = Fn->getArg(FirstIRArg + i);
3265 AI->setName(Arg->getName() + ".coerce" + Twine(i));
3266 LoadedStructValue =
3267 Builder.CreateInsertValue(LoadedStructValue, AI, i);
3268 }
3269
3271 } else {
3272 uint64_t SrcSize = StructSize.getFixedValue();
3273 uint64_t DstSize = PtrElementSize.getFixedValue();
3274
3276 if (SrcSize <= DstSize) {
3278 } else {
3279 AddrToStoreInto =
3281 }
3282
3283 assert(STy->getNumElements() == NumIRArgs);
3284 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3285 auto AI = Fn->getArg(FirstIRArg + i);
3286 AI->setName(Arg->getName() + ".coerce" + Twine(i));
3289 }
3290
3291 if (SrcSize > DstSize) {
3293 }
3294 }
3295 } else {
3296
3297 assert(NumIRArgs == 1);
3298 auto AI = Fn->getArg(FirstIRArg);
3299 AI->setName(Arg->getName() + ".coerce");
3301 AI, Ptr,
3302 llvm::TypeSize::getFixed(
3303 getContext().getTypeSizeInChars(Ty).getQuantity() -
3305 false);
3306 }
3307
3308
3310 llvm::Value *V =
3312 if (isPromoted)
3315 } else {
3317 }
3318 break;
3319 }
3320
3322
3325
3328 auto *unpaddedStruct = dyn_castllvm::StructType(unpaddedCoercionType);
3329
3331
3332 unsigned argIndex = FirstIRArg;
3333 unsigned unpaddedIndex = 0;
3334 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3335 llvm::Type *eltType = coercionType->getElementType(i);
3337 continue;
3338
3340 llvm::Value *elt = Fn->getArg(argIndex++);
3341
3342 auto paramType = unpaddedStruct
3343 ? unpaddedStruct->getElementType(unpaddedIndex++)
3344 : unpaddedCoercionType;
3345
3346 if (auto *VecTyTo = dyn_castllvm::FixedVectorType(eltType)) {
3347 if (auto *VecTyFrom = dyn_castllvm::ScalableVectorType(paramType)) {
3348 bool Extracted;
3350 *this, VecTyTo, VecTyFrom, elt, elt->getName());
3351 assert(Extracted && "Unexpected scalable to fixed vector coercion");
3352 }
3353 }
3355 }
3356 assert(argIndex == FirstIRArg + NumIRArgs);
3357 break;
3358 }
3359
3361
3362
3363
3367
3368 auto FnArgIter = Fn->arg_begin() + FirstIRArg;
3369 ExpandTypeFromArgs(Ty, LV, FnArgIter);
3370 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs);
3371 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
3372 auto AI = Fn->getArg(FirstIRArg + i);
3373 AI->setName(Arg->getName() + "." + Twine(i));
3374 }
3375 break;
3376 }
3377
3379 assert(NumIRArgs == 0);
3380
3383 } else {
3386 }
3387 break;
3388 }
3389 }
3390
3391 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3392 for (int I = Args.size() - 1; I >= 0; --I)
3394 } else {
3395 for (unsigned I = 0, E = Args.size(); I != E; ++I)
3397 }
3398}
3399
3401 while (insn->use_empty()) {
3402 llvm::BitCastInst *bitcast = dyn_castllvm::BitCastInst(insn);
3403 if (!bitcast) return;
3404
3405
3406 insn = castllvm::Instruction(bitcast->getOperand(0));
3407 bitcast->eraseFromParent();
3408 }
3409}
3410
3411
3413 llvm::Value *result) {
3414
3415 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
3416 if (BB->empty()) return nullptr;
3417 if (&BB->back() != result) return nullptr;
3418
3419 llvm::Type *resultType = result->getType();
3420
3421
3422 llvm::Instruction *generator = castllvm::Instruction(result);
3423
3425
3426
3427
3428 while (llvm::BitCastInst *bitcast = dyn_castllvm::BitCastInst(generator)) {
3429
3430
3431 generator = castllvm::Instruction(bitcast->getOperand(0));
3432
3433
3434 if (generator->getNextNode() != bitcast)
3435 return nullptr;
3436
3437 InstsToKill.push_back(bitcast);
3438 }
3439
3440
3441
3442
3443
3444 llvm::CallInst *call = dyn_castllvm::CallInst(generator);
3445 if (!call) return nullptr;
3446
3447 bool doRetainAutorelease;
3448
3450 doRetainAutorelease = true;
3451 } else if (call->getCalledOperand() ==
3453 doRetainAutorelease = false;
3454
3455
3456
3457
3458
3459
3461 llvm::Instruction *prev = call->getPrevNode();
3462 assert(prev);
3463 if (isallvm::BitCastInst(prev)) {
3464 prev = prev->getPrevNode();
3465 assert(prev);
3466 }
3467 assert(isallvm::CallInst(prev));
3468 assert(castllvm::CallInst(prev)->getCalledOperand() ==
3470 InstsToKill.push_back(prev);
3471 }
3472 } else {
3473 return nullptr;
3474 }
3475
3476 result = call->getArgOperand(0);
3477 InstsToKill.push_back(call);
3478
3479
3480
3481 while (llvm::BitCastInst *bitcast = dyn_castllvm::BitCastInst(result)) {
3482 if (!bitcast->hasOneUse()) break;
3483 InstsToKill.push_back(bitcast);
3484 result = bitcast->getOperand(0);
3485 }
3486
3487
3488 for (auto *I : InstsToKill)
3489 I->eraseFromParent();
3490
3491
3492 if (doRetainAutorelease)
3494
3495
3496 return CGF.Builder.CreateBitCast(result, resultType);
3497}
3498
3499
3501 llvm::Value *result) {
3502
3504 dyn_cast_or_null(CGF.CurCodeDecl);
3505 if (!method) return nullptr;
3508
3509
3510
3511 llvm::CallInst *retainCall = dyn_castllvm::CallInst(result);
3512 if (!retainCall || retainCall->getCalledOperand() !=
3514 return nullptr;
3515
3516
3517 llvm::Value *retainedValue = retainCall->getArgOperand(0);
3518 llvm::LoadInst *load =
3519 dyn_castllvm::LoadInst(retainedValue->stripPointerCasts());
3520 if (!load || load->isAtomic() || load->isVolatile() ||
3522 return nullptr;
3523
3524
3525
3526
3527 llvm::Type *resultType = result->getType();
3529 assert(retainCall->use_empty());
3530 retainCall->eraseFromParent();
3532
3533 return CGF.Builder.CreateBitCast(load, resultType);
3534}
3535
3536
3537
3538
3540 llvm::Value *result) {
3541
3542
3543
3544
3546 return self;
3547
3548
3551 return fused;
3552
3554}
3555
3556
3559
3560
3561
3562
3563 auto GetStoreIfValid = [&CGF,
3564 ReturnValuePtr](llvm::User *U) -> llvm::StoreInst * {
3565 auto *SI = dyn_castllvm::StoreInst(U);
3566 if (!SI || SI->getPointerOperand() != ReturnValuePtr ||
3568 return nullptr;
3569
3570
3571
3572 assert(!SI->isAtomic() &&
3574 return SI;
3575 };
3576
3577
3578
3579
3580 if (!ReturnValuePtr->hasOneUse()) {
3581 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
3582 if (IP->empty()) return nullptr;
3583
3584
3585
3586 for (llvm::Instruction &I : make_range(IP->rbegin(), IP->rend())) {
3587 if (isallvm::BitCastInst(&I))
3588 continue;
3589 if (auto *II = dyn_castllvm::IntrinsicInst(&I))
3590 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end)
3591 continue;
3592
3593 return GetStoreIfValid(&I);
3594 }
3595 return nullptr;
3596 }
3597
3598 llvm::StoreInst *store = GetStoreIfValid(ReturnValuePtr->user_back());
3599 if (!store) return nullptr;
3600
3601
3602
3603 llvm::BasicBlock *StoreBB = store->getParent();
3604 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
3606 while (IP != StoreBB) {
3607 if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor()))
3608 return nullptr;
3609 }
3610
3611
3612
3613 return store;
3614}
3615
3616
3617
3618
3619
3620
3621
3623 int BitWidth, int CharWidth) {
3624 assert(CharWidth <= 64);
3625 assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3626
3627 int Pos = 0;
3628 if (BitOffset >= CharWidth) {
3629 Pos += BitOffset / CharWidth;
3630 BitOffset = BitOffset % CharWidth;
3631 }
3632
3633 const uint64_t Used = (uint64_t(1) << CharWidth) - 1;
3634 if (BitOffset + BitWidth >= CharWidth) {
3635 Bits[Pos++] |= (Used << BitOffset) & Used;
3636 BitWidth -= CharWidth - BitOffset;
3637 BitOffset = 0;
3638 }
3639
3640 while (BitWidth >= CharWidth) {
3641 Bits[Pos++] = Used;
3642 BitWidth -= CharWidth;
3643 }
3644
3645 if (BitWidth > 0)
3646 Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset;
3647}
3648
3649
3650
3651
3652
3654 int StorageSize, int BitOffset, int BitWidth,
3655 int CharWidth, bool BigEndian) {
3656
3658 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3659
3660 if (BigEndian)
3661 std::reverse(TmpBits.begin(), TmpBits.end());
3662
3663 for (uint64_t V : TmpBits)
3664 Bits[StorageOffset++] |= V;
3665}
3666
3669
3670
3671
3672
3673
3674
3682
3683 int Idx = 0;
3686
3689 continue;
3690
3695 BFI.Size, CharWidth,
3697 continue;
3698 }
3699
3701 Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits);
3702 }
3703}
3704
3705
3706
3710
3715
3717 auto Src = TmpBits.begin();
3718 auto Dst = Bits.begin() + Offset + I * Size;
3719 for (int J = 0; J < Size; ++J)
3720 *Dst++ |= *Src++;
3721 }
3722}
3723
3724
3725
3729 return setUsedBits(CGM, RTy, Offset, Bits);
3730
3733 return setUsedBits(CGM, ATy, Offset, Bits);
3734
3736 if (Size <= 0)
3737 return;
3738
3739 std::fill_n(Bits.begin() + Offset, Size,
3740 (uint64_t(1) << Context.getCharWidth()) - 1);
3741}
3742
3744 int Pos, int Size, int CharWidth,
3745 bool BigEndian) {
3746 assert(Size > 0);
3747 uint64_t Mask = 0;
3748 if (BigEndian) {
3749 for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E;
3750 ++P)
3751 Mask = (Mask << CharWidth) | *P;
3752 } else {
3753 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
3754 do
3755 Mask = (Mask << CharWidth) | *--P;
3756 while (P != End);
3757 }
3758 return Mask;
3759}
3760
3761
3762
3764 llvm::IntegerType *ITy,
3766 assert(Src->getType() == ITy);
3767 assert(ITy->getScalarSizeInBits() <= 64);
3768
3770 int Size = DataLayout.getTypeStoreSize(ITy);
3773
3776 buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian());
3777
3778 return Builder.CreateAnd(Src, Mask, "cmse.clear");
3779}
3780
3781
3782
3784 llvm::ArrayType *ATy,
3787 int Size = DataLayout.getTypeStoreSize(ATy);
3790
3791
3793 int CharsPerElt =
3794 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3795 int MaskIndex = 0;
3796 llvm::Value *R = llvm::PoisonValue::get(ATy);
3797 for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3799 DataLayout.isBigEndian());
3800 MaskIndex += CharsPerElt;
3801 llvm::Value *T0 = Builder.CreateExtractValue(Src, I);
3802 llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear");
3803 R = Builder.CreateInsertValue(R, T1, I);
3804 }
3805
3806 return R;
3807}
3808
3810 bool EmitRetDbgLoc,
3813
3815 return;
3816 }
3817
3819
3820 Builder.CreateUnreachable();
3821 return;
3822 }
3823
3824
3826 Builder.CreateRetVoid();
3827 return;
3828 }
3829
3830 llvm::DebugLoc RetDbgLoc;
3831 llvm::Value *RV = nullptr;
3834
3835 switch (RetAI.getKind()) {
3837
3838
3841 llvm::Function::arg_iterator EI = CurFn->arg_end();
3842 --EI;
3843 llvm::Value *ArgStruct = &*EI;
3846 llvm::Type *Ty =
3847 castllvm::GetElementPtrInst(SRet)->getResultElementType();
3849 }
3850 break;
3851
3853 auto AI = CurFn->arg_begin();
3855 ++AI;
3861 true);
3862 break;
3863 }
3865
3866 break;
3877 true);
3878 break;
3879 }
3880 }
3881 break;
3882 }
3883
3888
3889
3890
3891
3892
3893 if (llvm::StoreInst *SI =
3895
3896
3897
3899 RetDbgLoc = SI->getDebugLoc();
3900
3901 RV = SI->getValueOperand();
3902 SI->eraseFromParent();
3903
3904
3905 } else {
3907 }
3908 } else {
3909
3911
3913 }
3914
3915
3916
3918#ifndef NDEBUG
3919
3920
3921
3922
3924
3925 if (auto *FD = dyn_cast(CurCodeDecl))
3926 RT = FD->getReturnType();
3927 else if (auto *MD = dyn_cast(CurCodeDecl))
3928 RT = MD->getReturnType();
3931 else
3932 llvm_unreachable("Unexpected function/method type");
3933
3934 assert(getLangOpts().ObjCAutoRefCount &&
3937#endif
3939 }
3940
3941 break;
3942
3944 break;
3945
3949 auto *unpaddedStruct = dyn_castllvm::StructType(unpaddedCoercionType);
3950
3951
3954 unsigned unpaddedIndex = 0;
3955 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3956 auto coercedEltType = coercionType->getElementType(i);
3958 continue;
3959
3962 eltAddr,
3963 unpaddedStruct ? unpaddedStruct->getElementType(unpaddedIndex++)
3964 : unpaddedCoercionType,
3965 *this);
3966 results.push_back(elt);
3967 }
3968
3969
3970 if (results.size() == 1) {
3971 RV = results[0];
3972
3973
3974 } else {
3975
3977
3978 RV = llvm::PoisonValue::get(returnType);
3979 for (unsigned i = 0, e = results.size(); i != e; ++i) {
3980 RV = Builder.CreateInsertValue(RV, results[i], i);
3981 }
3982 }
3983 break;
3984 }
3987 llvm_unreachable("Invalid ABI kind for return argument");
3988 }
3989
3990 llvm::Instruction *Ret;
3991 if (RV) {
3993
3994
3995
3996 auto *ITy = dyn_castllvm::IntegerType(RV->getType());
3997 if (ITy != nullptr && isa(RetTy.getCanonicalType()))
3999 }
4002 } else {
4004 }
4005
4006 if (RetDbgLoc)
4007 Ret->setDebugLoc(std::move(RetDbgLoc));
4008}
4009
4011
4013 return;
4014
4015
4016
4018 return;
4019
4020 ReturnsNonNullAttr *RetNNAttr = nullptr;
4021 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
4023
4024 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
4025 return;
4026
4027
4031 if (RetNNAttr) {
4032 assert(!requiresReturnValueNullabilityCheck() &&
4033 "Cannot check nullability and the nonnull attribute");
4034 AttrLoc = RetNNAttr->getLocation();
4035 CheckKind = SanitizerKind::SO_ReturnsNonnullAttribute;
4036 Handler = SanitizerHandler::NonnullReturn;
4037 } else {
4038 if (auto *DD = dyn_cast(CurCodeDecl))
4039 if (auto *TSI = DD->getTypeSourceInfo())
4040 if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>())
4041 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
4042 CheckKind = SanitizerKind::SO_NullabilityReturn;
4043 Handler = SanitizerHandler::NullabilityReturn;
4044 }
4045
4046 SanitizerScope SanScope(this);
4047
4048
4049
4051 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
4052 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
4053 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
4054 if (requiresReturnValueNullabilityCheck())
4055 CanNullCheck =
4056 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
4057 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
4059
4060
4061 llvm::Value *Cond = Builder.CreateIsNotNull(RV);
4063 llvm::Value *DynamicData[] = {SLocPtr};
4064 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
4065
4067
4068#ifndef NDEBUG
4069
4071#endif
4072}
4073
4077}
4078
4081
4082
4084 llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(CGF.getLLVMContext());
4085 llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy);
4086
4087
4088
4091
4098}
4099
4103
4104
4105
4107
4109
4110
4111
4112 if (type->isReferenceType()) {
4114
4115
4116
4117
4118
4119
4120 } else if (getLangOpts().ObjCAutoRefCount &&
4121 param->hasAttr() &&
4122 type->isObjCRetainableType()) {
4124 auto null =
4125 llvm::ConstantPointerNull::get(castllvm::PointerType(ptr->getType()));
4128
4129
4130
4131 } else {
4133 }
4134
4135
4140 CalleeDestructedParamCleanups.lookup(cast(param));
4141 assert(cleanup.isValid() &&
4142 "cleanup for callee-destructed param not recorded");
4143
4144 llvm::Instruction *isActive = Builder.CreateUnreachable();
4146 }
4147}
4148
4150 return llvm::isa_and_nonnullllvm::ConstantPointerNull(addr);
4151}
4152
4155}
4156
4157
4163 "shouldn't have writeback for provably null argument");
4164
4167
4171 return;
4172 }
4173
4174 llvm::BasicBlock *contBB = nullptr;
4175
4176
4177
4179
4180 if (!provablyNonNull) {
4181 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
4183
4185 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
4187 }
4188
4189
4191
4192
4194 "icr.writeback-cast");
4195
4196
4197
4198
4199
4200
4201
4202
4203 if (writeback.ToUse) {
4205
4206
4207
4209
4210
4212
4213
4215
4216
4218
4219
4221
4222
4223 } else {
4225 }
4226
4227
4228 if (!provablyNonNull)
4230}
4231
4236
4237 for (const auto &I : llvm::reverse(Cleanups)) {
4239 I.IsActiveIP->eraseFromParent();
4240 }
4241}
4242
4245 if (uop->getOpcode() == UO_AddrOf)
4246 return uop->getSubExpr();
4247 return nullptr;
4248}
4249
4250
4251
4252
4253
4257
4258
4259
4262
4263
4264 } else {
4266
4270 }
4272
4273
4274
4275
4276 llvm::PointerType *destType =
4278 llvm::Type *destElemType =
4280
4281
4283 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
4285 return;
4286 }
4287
4288
4291
4292
4293
4294
4295 CodeGenFunction::ConditionalEvaluation condEval(CGF);
4296
4297
4298 bool shouldCopy = CRE->shouldCopy();
4299 if (!shouldCopy) {
4300 llvm::Value *null =
4301 llvm::ConstantPointerNull::get(castllvm::PointerType(destElemType));
4303 }
4304
4305 llvm::BasicBlock *contBB = nullptr;
4306 llvm::BasicBlock *originBB = nullptr;
4307
4308
4309 llvm::Value *finalArgument;
4310
4312
4313 if (provablyNonNull) {
4315 } else {
4317
4318 finalArgument = CGF.Builder.CreateSelect(
4319 isNull, llvm::ConstantPointerNull::get(destType),
4321
4322
4323
4324 if (shouldCopy) {
4325 originBB = CGF.Builder.GetInsertBlock();
4327 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
4328 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
4330 condEval.begin(CGF);
4331 }
4332 }
4333
4334 llvm::Value *valueToUse = nullptr;
4335
4336
4337 if (shouldCopy) {
4340
4342 src = CGF.Builder.CreateBitCast(src, destElemType, "icr.cast");
4343
4344
4346
4347
4348
4349
4350
4351
4354 valueToUse = src;
4355 }
4356 }
4357
4358
4359 if (shouldCopy && !provablyNonNull) {
4360 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
4362
4363
4364 if (valueToUse) {
4365 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
4366 "icr.to-use");
4367 phiToUse->addIncoming(valueToUse, copyBB);
4368 phiToUse->addIncoming(llvm::PoisonValue::get(valueToUse->getType()),
4369 originBB);
4370 valueToUse = phiToUse;
4371 }
4372
4373 condEval.end(CGF);
4374 }
4375
4378}
4379
4381 assert(!StackBase);
4382
4383
4384 StackBase = CGF.Builder.CreateStackSave("inalloca.save");
4385}
4386
4388 if (StackBase) {
4389
4390 CGF.Builder.CreateStackRestore(StackBase);
4391 }
4392}
4393
4396 AbstractCallee AC,
4397 unsigned ParmNum) {
4398 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
4399 SanOpts.has(SanitizerKind::NullabilityArg)))
4400 return;
4401
4402
4403 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
4404 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
4405
4406
4407 const NonNullAttr *NNAttr = nullptr;
4408 if (SanOpts.has(SanitizerKind::NonnullAttribute))
4409 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
4410
4411 bool CanCheckNullability = false;
4412 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD &&
4413 !PVD->getType()->isRecordType()) {
4414 auto Nullability = PVD->getType()->getNullability();
4415 CanCheckNullability = Nullability &&
4417 PVD->getTypeSourceInfo();
4418 }
4419
4420 if (!NNAttr && !CanCheckNullability)
4421 return;
4422
4426 if (NNAttr) {
4427 AttrLoc = NNAttr->getLocation();
4428 CheckKind = SanitizerKind::SO_NonnullAttribute;
4429 Handler = SanitizerHandler::NonnullArg;
4430 } else {
4431 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4432 CheckKind = SanitizerKind::SO_NullabilityArg;
4433 Handler = SanitizerHandler::NullabilityArg;
4434 }
4435
4436 SanitizerScope SanScope(this);
4438 llvm::Constant *StaticData[] = {
4440 llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
4441 };
4442 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, {});
4443}
4444
4447 AbstractCallee AC, unsigned ParmNum) {
4448 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
4449 SanOpts.has(SanitizerKind::NullabilityArg)))
4450 return;
4451
4453}
4454
4455
4456
4457
4460
4461
4462
4463
4465 return false;
4467 return false;
4468 return llvm::any_of(ArgTypes, [&](QualType Ty) {
4470 });
4471}
4472
4473#ifndef NDEBUG
4474
4475
4478 if (const ObjCInterfaceDecl *classDecl = dyn_cast(dc)) {
4479 return classDecl->getTypeParamListAsWritten();
4480 }
4481
4482 if (const ObjCCategoryDecl *catDecl = dyn_cast(dc)) {
4483 return catDecl->getTypeParamList();
4484 }
4485
4486 return false;
4487}
4488#endif
4489
4490
4493 llvm::iterator_rangeCallExpr::const\_arg\_iterator ArgRange,
4494 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
4496
4497 assert((ParamsToSkip == 0 || Prototype.P) &&
4498 "Can't skip parameters if type info is not provided");
4499
4500
4501
4502
4503
4504
4506
4507
4508 bool IsVariadic = false;
4510 const auto *MD = dyn_cast<const ObjCMethodDecl *>(Prototype.P);
4511 if (MD) {
4512 IsVariadic = MD->isVariadic();
4515 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4516 MD->param_type_end());
4517 } else {
4518 const auto *FPT = cast<const FunctionProtoType *>(Prototype.P);
4519 IsVariadic = FPT->isVariadic();
4520 ExplicitCC = FPT->getExtInfo().getCC();
4521 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4522 FPT->param_type_end());
4523 }
4524
4525#ifndef NDEBUG
4526
4529 for (QualType Ty : ArgTypes) {
4530 assert(Arg != ArgRange.end() && "Running over edge of argument list!");
4531 assert(
4532 (isGenericMethod || Ty->isVariablyModifiedType() ||
4533 Ty.getNonReferenceType()->isObjCRetainableType() ||
4535 .getCanonicalType(Ty.getNonReferenceType())
4536 .getTypePtr() ==
4537 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&
4538 "type mismatch in call argument!");
4539 ++Arg;
4540 }
4541
4542
4543
4544 assert((Arg == ArgRange.end() || IsVariadic) &&
4545 "Extra arguments in non-variadic function!");
4546#endif
4547 }
4548
4549
4550 for (auto *A : llvm::drop_begin(ArgRange, ArgTypes.size()))
4551 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4552 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
4553
4554
4555
4556
4557
4558
4559 bool LeftToRight =
4563
4564 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
4565 RValue EmittedArg) {
4566 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
4567 return;
4568 auto *PS = AC.getParamDecl(I)->getAttr();
4569 if (PS == nullptr)
4570 return;
4571
4575 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
4576 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
4577 EmittedArg.getScalarVal(),
4578 PS->isDynamic());
4580
4581
4582 if (!LeftToRight)
4583 std::swap(Args.back(), *(&Args.back() - 1));
4584 };
4585
4586
4588 assert(getTarget().getTriple().getArch() == llvm::Triple::x86 &&
4589 "inalloca only supported on x86");
4591 }
4592
4593
4594 size_t CallArgsStart = Args.size();
4595 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
4596 unsigned Idx = LeftToRight ? I : E - I - 1;
4598 unsigned InitialArgSize = Args.size();
4599
4600
4601 assert((!isa(*Arg) ||
4602 getContext().hasSameUnqualifiedType((*Arg)->getType(),
4603 ArgTypes[Idx]) ||
4604 (isa(AC.getDecl()) &&
4606 "Argument and parameter types don't match");
4608
4609
4610 assert(InitialArgSize + 1 == Args.size() &&
4611 "The code below depends on only adding one arg per EmitCallArg");
4612 (void)InitialArgSize;
4613
4614
4615 if (!Args.back().hasLValue()) {
4616 RValue RVArg = Args.back().getKnownRValue();
4618 ParamsToSkip + Idx);
4619
4620
4621
4622 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4623 }
4624 }
4625
4626 if (!LeftToRight) {
4627
4628
4629 std::reverse(Args.begin() + CallArgsStart, Args.end());
4630
4631
4633 }
4634}
4635
4636namespace {
4637
4640 : Addr(Addr), Ty(Ty) {}
4641
4644
4651 false, Addr, Ty);
4652 } else {
4654 }
4655 }
4656};
4657
4658struct DisableDebugLocationUpdates {
4660 bool disabledDebugInfo;
4661 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
4662 if ((disabledDebugInfo = isa(E) && CGF.getDebugInfo()))
4664 }
4665 ~DisableDebugLocationUpdates() {
4666 if (disabledDebugInfo)
4668 }
4669};
4670
4671}
4672
4674 if (!HasLV)
4675 return RV;
4679 IsUsed = true;
4681}
4682
4689 else {
4692
4696 }
4697 IsUsed = true;
4698}
4699
4701 for (const auto &I : args.writebacks())
4703}
4704
4707 DisableDebugLocationUpdates Dis(*this, E);
4709 = dyn_cast(E)) {
4712 }
4713
4714
4715
4716
4717 if (const HLSLOutArgExpr *OE = dyn_cast(E)) {
4719 return;
4720 }
4721
4723 "reference binding to unmaterialized r-value!");
4724
4728 }
4729
4731
4732
4733
4734
4735 if (type->isRecordType() &&
4737
4738
4741
4742 bool DestroyedInCallee = true, NeedsCleanup = true;
4743 if (const auto *RD = type->getAsCXXRecordDecl())
4744 DestroyedInCallee = RD->hasNonTrivialDestructor();
4745 else
4746 NeedsCleanup = type.isDestructedType();
4747
4748 if (DestroyedInCallee)
4750
4754
4755 if (DestroyedInCallee && NeedsCleanup) {
4756
4757
4758
4761
4762 llvm::Instruction *IsActive =
4765 }
4766 return;
4767 }
4768
4769 if (HasAggregateEvalKind && isa(E) &&
4770 cast(E)->getCastKind() == CK_LValueToRValue &&
4771 ->isArrayParameterType()) {
4775 return;
4776 }
4777
4779}
4780
4781QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
4782
4783
4784
4785 if (().getTriple().isOSWindows())
4787
4794 }
4795
4797}
4798
4799
4800
4801void
4802CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
4805 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
4807}
4808
4809
4810llvm::CallInst *
4812 const llvm::Twine &name) {
4814}
4815
4816
4817llvm::CallInst *
4820 const llvm::Twine &name) {
4822 for (auto arg : args)
4823 values.push_back(arg.emitRawPointer(*this));
4825}
4826
4827llvm::CallInst *
4830 const llvm::Twine &name) {
4831 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
4832 call->setDoesNotThrow();
4833 return call;
4834}
4835
4836
4837
4839 const llvm::Twine &name) {
4841}
4842
4843
4844
4847
4848
4851
4852
4853
4854 if (auto *CalleeFn = dyn_castllvm::Function(Callee->stripPointerCasts())) {
4855 if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) {
4856 auto IID = CalleeFn->getIntrinsicID();
4857 if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID))
4859 }
4860 }
4861
4864 return BundleList;
4865}
4866
4867
4870 const llvm::Twine &name) {
4871 llvm::CallInst *call = Builder.CreateCall(
4874
4876 return castllvm::CallInst(addConvergenceControlToken(call));
4877 return call;
4878}
4879
4880
4885
4887 llvm::InvokeInst *invoke =
4888 Builder.CreateInvoke(callee,
4891 args,
4892 BundleList);
4893 invoke->setDoesNotReturn();
4895 } else {
4896 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
4897 call->setDoesNotReturn();
4899 Builder.CreateUnreachable();
4900 }
4901}
4902
4903
4904llvm::CallBase *
4906 const Twine &name) {
4908}
4909
4910
4911llvm::CallBase *
4914 const Twine &name) {
4915 llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
4917 return call;
4918}
4919
4920
4921
4924 const Twine &Name) {
4928
4929 llvm::CallBase *Inst;
4930 if (!InvokeDest)
4931 Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
4932 else {
4934 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
4935 Name);
4937 }
4938
4939
4940
4942 AddObjCARCExceptionMetadata(Inst);
4943
4944 return Inst;
4945}
4946
4947void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
4948 llvm::Value *New) {
4949 DeferredReplacements.push_back(
4950 std::make_pair(llvm::WeakTrackingVH(Old), New));
4951}
4952
4953namespace {
4954
4955
4956
4957[[nodiscard]] llvm::AttributeList
4958maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
4959 const llvm::AttributeList &Attrs,
4960 llvm::Align NewAlign) {
4961 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
4962 if (CurAlign >= NewAlign)
4963 return Attrs;
4964 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
4965 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment)
4966 .addRetAttribute(Ctx, AlignAttr);
4967}
4968
4969template class AbstractAssumeAlignedAttrEmitter {
4970protected:
4972
4973
4974 const AlignedAttrTy *AA = nullptr;
4975
4976 llvm::Value *Alignment = nullptr;
4977 llvm::ConstantInt *OffsetCI = nullptr;
4978
4979 AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4980 : CGF(CGF_) {
4981 if (!FuncDecl)
4982 return;
4983 AA = FuncDecl->getAttr();
4984 }
4985
4986public:
4987
4988 [[nodiscard]] llvm::AttributeList
4989 TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) {
4990 if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment))
4991 return Attrs;
4992 const auto *AlignmentCI = dyn_castllvm::ConstantInt(Alignment);
4993 if (!AlignmentCI)
4994 return Attrs;
4995
4996
4997 if (!AlignmentCI->getValue().isPowerOf2())
4998 return Attrs;
4999 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
5001 llvm::Align(
5002 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
5003 AA = nullptr;
5004 return NewAttrs;
5005 }
5006
5007
5008
5009
5011 if (!AA)
5012 return;
5014 AA->getLocation(), Alignment, OffsetCI);
5015 AA = nullptr;
5016 }
5017};
5018
5019
5020class AssumeAlignedAttrEmitter final
5021 : public AbstractAssumeAlignedAttrEmitter {
5022public:
5024 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
5025 if (!AA)
5026 return;
5027
5028 Alignment = castllvm::ConstantInt(CGF.EmitScalarExpr(AA->getAlignment()));
5029 if (Expr *Offset = AA->getOffset()) {
5030 OffsetCI = castllvm::ConstantInt(CGF.EmitScalarExpr(Offset));
5031 if (OffsetCI->isNullValue())
5032 OffsetCI = nullptr;
5033 }
5034 }
5035};
5036
5037
5038class AllocAlignAttrEmitter final
5039 : public AbstractAssumeAlignedAttrEmitter {
5040public:
5043 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
5044 if (!AA)
5045 return;
5046
5047 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
5048 .getRValue(CGF)
5049 .getScalarVal();
5050 }
5051};
5052
5053}
5054
5056 if (auto *VT = dyn_castllvm::VectorType(Ty))
5057 return VT->getPrimitiveSizeInBits().getKnownMinValue();
5058 if (auto *AT = dyn_castllvm::ArrayType(Ty))
5060
5061 unsigned MaxVectorWidth = 0;
5062 if (auto *ST = dyn_castllvm::StructType(Ty))
5063 for (auto *I : ST->elements())
5064 MaxVectorWidth = std::max(MaxVectorWidth, getMaxVectorWidth(I));
5065 return MaxVectorWidth;
5066}
5067
5072 llvm::CallBase **callOrInvoke, bool IsMustTail,
5074 bool IsVirtualFunctionPointerThunk) {
5075
5076
5077 assert(Callee.isOrdinary() || Callee.isVirtual());
5078
5079
5080
5083
5085
5086 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
5087 if (const FunctionDecl *FD = dyn_cast_or_null(TargetDecl)) {
5088
5089
5090
5091
5092
5093
5094 if (TargetDecl->hasAttr() &&
5095 (TargetDecl->hasAttr() ||
5098 }
5099
5100
5101
5103 const FunctionDecl *CalleeDecl = dyn_cast_or_null(TargetDecl);
5105 CalleeDecl, CallArgs, RetTy);
5106
5107
5108
5109
5110
5112 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
5114 llvm::Instruction *IP = CallArgs.getStackBase();
5115 llvm::AllocaInst *AI;
5116 if (IP) {
5117 IP = IP->getNextNode();
5118 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(), "argmem",
5119 IP->getIterator());
5120 } else {
5122 }
5124 AI->setAlignment(Align.getAsAlign());
5125 AI->setUsedWithInAlloca(true);
5126 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
5127 ArgMemory = RawAddress(AI, ArgStruct, Align);
5128 }
5129
5130 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
5132
5133
5134
5137 llvm::Value *UnusedReturnSizePtr = nullptr;
5139
5140
5141
5142
5143 if ((IsVirtualFunctionPointerThunk || IsMustTail) && RetAI.isIndirect()) {
5145 IRFunctionArgs.getSRetArgNo(),
5149 } else {
5150 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
5152 llvm::TypeSize size =
5155 }
5156 }
5157 if (IRFunctionArgs.hasSRetArg()) {
5158 IRCallArgs[IRFunctionArgs.getSRetArgNo()] =
5164 }
5165 }
5166
5169
5170
5171
5172
5174
5175
5176 assert(CallInfo.arg_size() == CallArgs.size() &&
5177 "Mismatch between function signature & arguments.");
5178 unsigned ArgNo = 0;
5180 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
5181 I != E; ++I, ++info_it, ++ArgNo) {
5183
5184
5185 if (IRFunctionArgs.hasPaddingArg(ArgNo))
5186 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
5188
5189 unsigned FirstIRArg, NumIRArgs;
5190 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
5191
5192 bool ArgHasMaybeUndefAttr =
5194
5195 switch (ArgInfo.getKind()) {
5197 assert(NumIRArgs == 0);
5198 assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
5199 if (I->isAggregate()) {
5201 ? I->getKnownLValue().getAddress()
5202 : I->getKnownRValue().getAggregateAddress();
5203 llvm::Instruction *Placeholder =
5204 castllvm::Instruction(Addr.getPointer());
5205
5207
5208 CGBuilderTy::InsertPoint IP = Builder.saveIP();
5209 Builder.SetInsertPoint(Placeholder);
5213 } else {
5214
5215
5216
5221 }
5222 deferPlaceholderReplacement(Placeholder, Addr.getPointer());
5224
5225
5227 I->Ty, getContext().getTypeAlignInChars(I->Ty),
5228 "indirect-arg-temp");
5229 I->copyInto(*this, Addr);
5233 } else {
5234
5238 I->copyInto(*this, Addr);
5239 }
5240 break;
5241 }
5242
5245 assert(NumIRArgs == 1);
5246 if (I->isAggregate()) {
5247
5248
5249
5250
5251
5252
5253
5254
5255 Address Addr = I->hasLValue()
5256 ? I->getKnownLValue().getAddress()
5257 : I->getKnownRValue().getAggregateAddress();
5260
5261 assert((FirstIRArg >= IRFuncTy->getNumParams() ||
5262 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
5263 TD->getAllocaAddrSpace()) &&
5264 "indirect argument must be in alloca address space");
5265
5266 bool NeedCopy = false;
5268 llvm::getOrEnforceKnownAlignment(Addr.emitRawPointer(*this),
5271 NeedCopy = true;
5272 } else if (I->hasLValue()) {
5273 auto LV = I->getKnownLValue();
5275
5276 bool isByValOrRef =
5278
5279 if (!isByValOrRef ||
5281 NeedCopy = true;
5282 }
5284 if ((isByValOrRef &&
5287 NeedCopy = true;
5288 }
5289 }
5290
5291
5292 else if ((isByValOrRef &&
5293 Addr.getType()->getAddressSpace() != IRFuncTy->
5294 getParamType(FirstIRArg)->getPointerAddressSpace())) {
5295 NeedCopy = true;
5296 }
5297 }
5298
5299 if (!NeedCopy) {
5300
5302 auto *T = llvm::PointerType::get(
5304
5307 true);
5308 if (ArgHasMaybeUndefAttr)
5309 Val = Builder.CreateFreeze(Val);
5310 IRCallArgs[FirstIRArg] = Val;
5311 break;
5312 }
5313 } else if (I->getType()->isArrayParameterType()) {
5314
5315
5316
5317
5318
5319 IRCallArgs[FirstIRArg] = I->getKnownRValue().getScalarVal();
5320 break;
5321 }
5322
5323
5324
5328 if (ArgHasMaybeUndefAttr)
5329 Val = Builder.CreateFreeze(Val);
5330 IRCallArgs[FirstIRArg] = Val;
5331
5332
5333 llvm::TypeSize ByvalTempElementSize =
5335 llvm::Value *LifetimeSize =
5337
5338
5339 if (LifetimeSize)
5340 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
5341
5342
5343 I->copyInto(*this, AI);
5344 break;
5345 }
5346
5348 assert(NumIRArgs == 0);
5349 break;
5350
5353 if (!isallvm::StructType(ArgInfo.getCoerceToType()) &&
5356 assert(NumIRArgs == 1);
5357 llvm::Value *V;
5358 if (!I->isAggregate())
5359 V = I->getKnownRValue().getScalarVal();
5360 else
5362 I->hasLValue() ? I->getKnownLValue().getAddress()
5363 : I->getKnownRValue().getAggregateAddress());
5364
5365
5366
5369 assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
5370
5373 V, pointeeTy, getContext().getTypeAlignInChars(pointeeTy));
5374
5375 swiftErrorTemp =
5378 castllvm::AllocaInst(V)->setSwiftError(true);
5379
5382 }
5383
5384
5386 V->getType()->isIntegerTy())
5388
5389
5390
5391 if (FirstIRArg < IRFuncTy->getNumParams() &&
5392 V->getType() != IRFuncTy->getParamType(FirstIRArg))
5393 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
5394
5395 if (ArgHasMaybeUndefAttr)
5397 IRCallArgs[FirstIRArg] = V;
5398 break;
5399 }
5400
5401 llvm::StructType *STy =
5403
5404
5406 if (!I->isAggregate()) {
5408 I->copyInto(*this, Src);
5409 } else {
5410 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
5411 : I->getKnownRValue().getAggregateAddress();
5412 }
5413
5414
5416
5417
5418
5421 llvm::TypeSize SrcTypeSize =
5423 llvm::TypeSize DstTypeSize = CGM.getDataLayout().getTypeAllocSize(STy);
5424 if (SrcTypeSize.isScalable()) {
5425 assert(STy->containsHomogeneousScalableVectorTypes() &&
5426 "ABI only supports structure with homogeneous scalable vector "
5427 "type");
5428 assert(SrcTypeSize == DstTypeSize &&
5429 "Only allow non-fractional movement of structure with "
5430 "homogeneous scalable vector type");
5431 assert(NumIRArgs == STy->getNumElements());
5432
5433 llvm::Value *StoredStructValue =
5435 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5436 llvm::Value *Extract = Builder.CreateExtractValue(
5437 StoredStructValue, i, Src.getName() + ".extract" + Twine(i));
5438 IRCallArgs[FirstIRArg + i] = Extract;
5439 }
5440 } else {
5441 uint64_t SrcSize = SrcTypeSize.getFixedValue();
5442 uint64_t DstSize = DstTypeSize.getFixedValue();
5443
5444
5445
5446
5447
5448 if (SrcSize < DstSize) {
5450 Src.getName() + ".coerce");
5452 Src = TempAlloca;
5453 } else {
5455 }
5456
5457 assert(NumIRArgs == STy->getNumElements());
5458 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5461 if (ArgHasMaybeUndefAttr)
5462 LI = Builder.CreateFreeze(LI);
5463 IRCallArgs[FirstIRArg + i] = LI;
5464 }
5465 }
5466 } else {
5467
5468 assert(NumIRArgs == 1);
5469 llvm::Value *Load =
5471
5473
5474
5475
5476 auto *ATy = dyn_castllvm::ArrayType(Load->getType());
5477 if (ATy != nullptr && isa(I->Ty.getCanonicalType()))
5479 }
5480
5481 if (ArgHasMaybeUndefAttr)
5483 IRCallArgs[FirstIRArg] = Load;
5484 }
5485
5486 break;
5487 }
5488
5491 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
5493 auto *unpaddedStruct = dyn_castllvm::StructType(unpaddedCoercionType);
5494
5495 llvm::Value *tempSize = nullptr;
5498 if (I->isAggregate()) {
5499 addr = I->hasLValue() ? I->getKnownLValue().getAddress()
5500 : I->getKnownRValue().getAggregateAddress();
5501
5502 } else {
5503 RValue RV = I->getKnownRValue();
5504 assert(RV.isScalar());
5505
5506 llvm::Type *scalarType = RV.getScalarVal()->getType();
5507 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
5508 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlign(scalarType);
5509
5510
5514 "tmp",
5515 nullptr, &AllocaAddr);
5517
5519 }
5520
5522
5523 unsigned IRArgPos = FirstIRArg;
5524 unsigned unpaddedIndex = 0;
5525 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5526 llvm::Type *eltType = coercionType->getElementType(i);
5530 eltAddr,
5531 unpaddedStruct ? unpaddedStruct->getElementType(unpaddedIndex++)
5532 : unpaddedCoercionType,
5533 *this);
5534 if (ArgHasMaybeUndefAttr)
5535 elt = Builder.CreateFreeze(elt);
5536 IRCallArgs[IRArgPos++] = elt;
5537 }
5538 assert(IRArgPos == FirstIRArg + NumIRArgs);
5539
5540 if (tempSize) {
5542 }
5543
5544 break;
5545 }
5546
5548 unsigned IRArgPos = FirstIRArg;
5549 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
5550 assert(IRArgPos == FirstIRArg + NumIRArgs);
5551 break;
5552 }
5553 }
5554 }
5555
5556 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
5558
5559
5560 if (ArgMemory.isValid()) {
5561 llvm::Value *Arg = ArgMemory.getPointer();
5562 assert(IRFunctionArgs.hasInallocaArg());
5563 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
5564 }
5565
5566
5567
5568
5569
5570
5571
5572
5573
5574 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
5575 llvm::Value *Ptr) -> llvm::Function * {
5576 if (!CalleeFT->isVarArg())
5577 return nullptr;
5578
5579
5580 if (llvm::ConstantExpr *CE = dyn_castllvm::ConstantExpr(Ptr)) {
5581 if (CE->getOpcode() == llvm::Instruction::BitCast)
5582 Ptr = CE->getOperand(0);
5583 }
5584
5585 llvm::Function *OrigFn = dyn_castllvm::Function(Ptr);
5586 if (!OrigFn)
5587 return nullptr;
5588
5589 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
5590
5591
5592
5593 if (OrigFT->isVarArg() ||
5594 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
5595 OrigFT->getReturnType() != CalleeFT->getReturnType())
5596 return nullptr;
5597
5598 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
5599 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
5600 return nullptr;
5601
5602 return OrigFn;
5603 };
5604
5605 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
5606 CalleePtr = OrigFn;
5607 IRFuncTy = OrigFn->getFunctionType();
5608 }
5609
5610
5611
5612
5613
5616
5617
5618
5619
5620
5621#ifndef NDEBUG
5622 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
5623 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
5624
5625 if (IRFunctionArgs.hasInallocaArg() &&
5626 i == IRFunctionArgs.getInallocaArgNo())
5627 continue;
5628 if (i < IRFuncTy->getNumParams())
5629 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
5630 }
5631#endif
5632
5633
5634 for (unsigned i = 0; i < IRCallArgs.size(); ++i)
5635 LargestVectorWidth = std::max(LargestVectorWidth,
5637
5638
5640 llvm::AttributeList Attrs;
5643 true,
5644 false);
5645
5646 if (CallingConv == llvm::CallingConv::X86_VectorCall &&
5647 getTarget().getTriple().isWindowsArm64EC()) {
5648 CGM.Error(Loc, "__vectorcall calling convention is not currently "
5649 "supported");
5650 }
5651
5653 if (FD->hasAttr())
5654
5655 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP);
5656
5657
5658
5659
5660 if (FD->hasAttr() && getLangOpts().FastMath)
5662 Attrs);
5663 }
5664
5666 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoMerge);
5667
5668
5670 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline);
5671
5672
5673
5676 CallerDecl, CalleeDecl))
5677 Attrs =
5678 Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline);
5679
5680
5682 Attrs =
5683 Attrs.removeFnAttribute(getLLVMContext(), llvm::Attribute::Convergent);
5684
5685
5686
5687
5688
5689
5692 !(TargetDecl && TargetDecl->hasAttr()) &&
5694 CallerDecl, CalleeDecl)) {
5695 Attrs =
5696 Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline);
5697 }
5698
5699
5701 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline);
5702 }
5703
5704
5705 bool CannotThrow;
5707
5708 CannotThrow = false;
5711
5712
5713
5714 CannotThrow = true;
5715 } else {
5716
5717 CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind);
5718
5719 if (auto *FPtr = dyn_castllvm::Function(CalleePtr))
5720 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
5721 CannotThrow = true;
5722 }
5723
5724
5725
5726
5727
5728 if (UnusedReturnSizePtr)
5730 UnusedReturnSizePtr);
5731
5732 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
5733
5736
5737 if (SanOpts.has(SanitizerKind::KCFI) &&
5738 !isa_and_nonnull(TargetDecl))
5740
5741
5743
5745 if (FD->hasAttr())
5746
5747 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP);
5748
5749 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl);
5750 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5751
5752 AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs);
5753 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5754
5755
5756 llvm::CallBase *CI;
5757 if (!InvokeDest) {
5758 CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
5759 } else {
5761 CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
5762 BundleList);
5764 }
5765 if (CI->getCalledFunction() && CI->getCalledFunction()->hasName() &&
5766 CI->getCalledFunction()->getName().starts_with("_Z4sqrt")) {
5768 }
5769 if (callOrInvoke)
5770 *callOrInvoke = CI;
5771
5772
5773
5774
5775 if (const auto *FD = dyn_cast_or_null(CurFuncDecl)) {
5776 if (const auto *A = FD->getAttr()) {
5777 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
5778 Attrs = Attrs.addFnAttribute(getLLVMContext(), "guard_nocf");
5779 }
5780 }
5781
5782
5783 CI->setAttributes(Attrs);
5784 CI->setCallingConv(static_castllvm::CallingConv::ID\(CallingConv));
5785
5786
5787
5788 if (!CI->getType()->isVoidTy())
5789 CI->setName("call");
5790
5792 CI = addConvergenceControlToken(CI);
5793
5794
5795 LargestVectorWidth =
5797
5798
5799
5800
5801 if (!CI->getCalledFunction())
5803 CI, CalleePtr);
5804
5805
5806
5808 AddObjCARCExceptionMetadata(CI);
5809
5810
5811 if (llvm::CallInst *Call = dyn_castllvm::CallInst(CI)) {
5812 if (TargetDecl && TargetDecl->hasAttr())
5813 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
5814 else if (IsMustTail) {
5815 if (getTarget().getTriple().isPPC()) {
5816 if (getTarget().getTriple().isOSAIX())
5821 else if (Call->isIndirectCall())
5823 else if (isa_and_nonnull(TargetDecl)) {
5824 if (!cast(TargetDecl)->isDefined())
5825
5826
5827
5829 {cast(TargetDecl), Loc});
5830 else {
5832 GlobalDecl(cast(TargetDecl)));
5833 if (llvm::GlobalValue::isWeakForLinker(Linkage) ||
5834 llvm::GlobalValue::isDiscardableIfUnused(Linkage))
5836 << 2;
5837 }
5838 }
5839 }
5840 }
5841 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
5842 }
5843 }
5844
5845
5847 TargetDecl->hasAttr())
5849
5850
5851 if (TargetDecl && TargetDecl->hasAttr()) {
5852 llvm::ConstantInt *Line =
5854 llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(Line);
5855 llvm::MDTuple *MDT = llvm::MDNode::get(getLLVMContext(), {MD});
5856 CI->setMetadata("srcloc", MDT);
5857 }
5858
5859
5860
5861
5862
5863
5864 if (CI->doesNotReturn()) {
5865 if (UnusedReturnSizePtr)
5867
5868
5869 if (SanOpts.has(SanitizerKind::Unreachable)) {
5870
5871
5872 if (auto *F = CI->getCalledFunction())
5873 F->removeFnAttr(llvm::Attribute::NoReturn);
5874 CI->removeFnAttr(llvm::Attribute::NoReturn);
5875
5876
5877
5879 SanitizerKind::KernelAddress)) {
5880 SanitizerScope SanScope(this);
5881 llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
5882 Builder.SetInsertPoint(CI);
5883 auto *FnType = llvm::FunctionType::get(CGM.VoidTy, false);
5884 llvm::FunctionCallee Fn =
5887 }
5888 }
5889
5891 Builder.ClearInsertionPoint();
5892
5893
5894
5895
5897
5898
5900 }
5901
5902
5903
5904 if (IsMustTail) {
5906 ++it) {
5907 EHCleanupScope *Cleanup = dyn_cast(&*it);
5910 }
5911 if (CI->getType()->isVoidTy())
5912 Builder.CreateRetVoid();
5913 else
5915 Builder.ClearInsertionPoint();
5918 }
5919
5920
5921 if (swiftErrorTemp.isValid()) {
5924 }
5925
5926
5927
5930
5931
5932
5934
5935
5937
5938
5939
5940 if (IsVirtualFunctionPointerThunk) {
5942 } else {
5943 Ret = [&] {
5944 switch (RetAI.getKind()) {
5947
5949
5951 bool requiresExtract = isallvm::StructType(CI->getType());
5952
5953 unsigned unpaddedIndex = 0;
5954 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5955 llvm::Type *eltType = coercionType->getElementType(i);
5957 continue;
5959 llvm::Value *elt = CI;
5960 if (requiresExtract)
5961 elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
5962 else
5963 assert(unpaddedIndex == 0);
5965 }
5966 [[fallthrough]];
5967 }
5968
5972 if (UnusedReturnSizePtr)
5974 return ret;
5975 }
5976
5978
5979
5981
5984 llvm::Type *RetIRTy = ConvertType(RetTy);
5989 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
5990 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
5992 }
5994 break;
5996
5997
5998 llvm::Value *V = CI;
5999 if (V->getType() != RetIRTy)
6000 V = Builder.CreateBitCast(V, RetIRTy);
6002 }
6003 }
6004 }
6005
6006
6007
6008
6009 if (auto *FixedDstTy = dyn_castllvm::FixedVectorType(RetIRTy)) {
6010 llvm::Value *V = CI;
6011 if (auto *ScalableSrcTy =
6012 dyn_castllvm::ScalableVectorType(V->getType())) {
6013 if (FixedDstTy->getElementType() ==
6014 ScalableSrcTy->getElementType()) {
6015 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
6016 V = Builder.CreateExtractVector(FixedDstTy, V, Zero,
6017 "cast.fixed");
6019 }
6020 }
6021 }
6022
6024 bool DestIsVolatile = ReturnValue.isVolatile();
6027
6028 if (!DestPtr.isValid()) {
6030 DestIsVolatile = false;
6032 }
6033
6034
6035
6036
6038
6041 CI, StorePtr,
6042 llvm::TypeSize::getFixed(DestSize - RetAI.getDirectOffset()),
6043 DestIsVolatile);
6044 }
6045
6047 }
6048
6051 llvm_unreachable("Invalid ABI kind for return argument");
6052 }
6053
6054 llvm_unreachable("Unhandled ABIArgInfo::Kind");
6055 }();
6056 }
6057
6058
6059 if (Ret.isScalar() && TargetDecl) {
6060 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
6061 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
6062 }
6063
6064
6065
6066 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
6067 LifetimeEnd.Emit(*this, {});
6068
6069 if (.isExternallyDestructed() &&
6072 RetTy);
6073
6074 return Ret;
6075}
6076
6083 }
6084
6085 return *this;
6086}
6087
6088
6089
6100}
static void appendParameterTypes(const CodeGenTypes &CGT, SmallVectorImpl< CanQualType > &prefix, SmallVectorImpl< FunctionProtoType::ExtParameterInfo > ¶mInfos, CanQual< FunctionProtoType > FPT)
Adds the formal parameters in FPT to the given prefix.
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
static uint64_t buildMultiCharMask(const SmallVectorImpl< uint64_t > &Bits, int Pos, int Size, int CharWidth, bool BigEndian)
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable 'self', remove it.
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type.
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
static void setBitRange(SmallVectorImpl< uint64_t > &Bits, int BitOffset, int BitWidth, int CharWidth)
static SmallVector< CanQualType, 16 > getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
static bool isProvablyNull(llvm::Value *addr)
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
static void eraseUnusedBitCasts(llvm::Instruction *insn)
static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method)
static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, const LangOptions &LangOpts, const NoBuiltinAttr *NBA=nullptr)
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that's being passed call-by-writeback.
static void overrideFunctionFeaturesWithTargetFeatures(llvm::AttrBuilder &FuncAttr, const llvm::Function &F, const TargetOptions &TargetOpts)
Merges target-features from \TargetOpts and \F, and sets the result in \FuncAttr.
static const CGFunctionInfo & arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, bool chainCall)
Arrange a call as unto a free function, except possibly with an additional number of formal parameter...
static llvm::Value * CreateCoercedLoad(Address Src, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
static llvm::SmallVector< FunctionProtoType::ExtParameterInfo, 16 > getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, bool IsWindows)
static int getExpansionSize(QualType Ty, const ASTContext &Context)
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, const llvm::DataLayout &DL, const ABIArgInfo &AI, bool CheckCoerce=true)
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
static void addDenormalModeAttrs(llvm::DenormalMode FPDenormalMode, llvm::DenormalMode FP32DenormalMode, llvm::AttrBuilder &FuncAttrs)
Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the requested denormal behavior,...
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs)
static bool isProvablyNonNull(Address Addr, CodeGenFunction &CGF)
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
static std::pair< llvm::Value *, bool > CoerceScalableToFixed(CodeGenFunction &CGF, llvm::FixedVectorType *ToTy, llvm::ScalableVectorType *FromTy, llvm::Value *V, StringRef Name="")
static SmallVector< CanQualType, 16 > getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > ¶mInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType, bool IsReturn)
Test if it's legal to apply nofpclass for the given parameter type and it's lowered IR type.
static void getTrivialDefaultFunctionAttributes(StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, bool AttrOnCallSite, llvm::AttrBuilder &FuncAttrs)
static llvm::FPClassTest getNoFPClassTestMask(const LangOptions &LangOpts)
Return the nofpclass mask that can be applied to floating-point parameters.
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
static bool IsArgumentMaybeUndef(const Decl *TargetDecl, unsigned NumRequiredArgs, unsigned ArgNo)
Check if the argument of a function has maybe_undef attribute.
static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, ArrayRef< QualType > ArgTypes)
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
static RawAddress CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign, const Twine &Name="tmp")
Create a temporary allocation for the purposes of coercion.
static void setUsedBits(CodeGenModule &, QualType, int, SmallVectorImpl< uint64_t > &)
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy, const Decl *TargetDecl)
static void addMergableDefaultFunctionAttributes(const CodeGenOptions &CodeGenOpts, llvm::AttrBuilder &FuncAttrs)
Add default attributes to a function, which have merge semantics under -mlink-builtin-bitcode and sho...
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
static void AddAttributesFromOMPAssumes(llvm::AttrBuilder &FuncAttrs, const Decl *Callee)
static unsigned getMaxVectorWidth(const llvm::Type *Ty)
CodeGenFunction::ComplexPairTy ComplexPairTy
enum clang::sema::@1727::IndirectLocalPathEntry::EntryKind Kind
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
llvm::MachO::Target Target
static bool hasFeature(StringRef Feature, const LangOptions &LangOpts, const TargetInfo &Target)
Determine whether a translation unit built using the current language options has the given feature.
static QualType getParamType(Sema &SemaRef, ArrayRef< ResultCandidate > Candidates, unsigned N)
Get the type of the Nth parameter from a given set of overload candidates.
static QualType getPointeeType(const MemRegion *R)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one.
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CallingConv getDefaultCallingConvention(bool IsVariadic, bool IsCXXMethod, bool IsBuiltin=false) const
Retrieves the default calling convention for the current target.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
QualType getObjCSelType() const
Retrieve the type that corresponds to the predefined Objective-C 'SEL' type.
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
TypeInfoChars getTypeInfoDataSizeInChars(QualType T) const
TypeInfoChars getTypeInfoInChars(const Type *T) const
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const TargetInfo & getTargetInfo() const
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
uint64_t getConstantArrayElementCount(const ConstantArrayType *CA) const
Return number of constant array elements.
QualType getIntPtrType() const
Return a type compatible with "intptr_t" (C99 7.18.1.4), as defined by the target.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Attr - This represents one attribute.
const FunctionProtoType * getFunctionType() const
getFunctionType - Return the underlying function type for this block.
This class is used for builtin types like 'int'.
Represents a base class of a C++ class.
QualType getType() const
Retrieves the type of the base class.
Represents a C++ constructor within a class.
Represents a C++ destructor within a class.
Represents a static or instance method of a struct/union/class.
bool isImplicitObjectMemberFunction() const
[C++2b][dcl.fct]/p7 An implicit object member function is a non-static member function without an exp...
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Qualifiers getMethodQualifiers() const
Represents a C++ struct/union/class.
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
SourceLocation getBeginLoc() const LLVM_READONLY
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
CanProxy< U > castAs() const
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
CharUnits - This is an opaque type for sizes expressed in character units.
bool isZero() const
isZero - Test whether the quantity equals zero.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
llvm::DenormalMode FPDenormalMode
The floating-point denormal mode to use.
static StringRef getFramePointerKindName(FramePointerKind Kind)
std::vector< std::string > Reciprocals
llvm::DenormalMode FP32DenormalMode
The floating-point denormal mode to use, for float.
std::string TrapFuncName
If not an empty string, trap intrinsics are lowered to calls to this function instead of to trap inst...
std::vector< std::string > DefaultFunctionAttrs
std::string PreferVectorWidth
The preferred width for auto-vectorization transforms.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
unsigned getInAllocaFieldIndex() const
bool getIndirectByVal() const
llvm::StructType * getCoerceAndExpandType() const
bool getIndirectRealign() const
void setCoerceToType(llvm::Type *T)
llvm::Type * getUnpaddedCoerceAndExpandType() const
bool getCanBeFlattened() const
unsigned getDirectOffset() const
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Type * getPaddingType() const
bool getPaddingInReg() const
unsigned getDirectAlign() const
unsigned getIndirectAddrSpace() const
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
bool isCoerceAndExpand() const
unsigned getInAllocaIndirect() const
llvm::Type * getCoerceToType() const
bool isIndirectAliased() const
bool isSRetAfterThis() const
bool canHaveCoerceToType() const
CharUnits getIndirectAlign() const
virtual RValue EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const
Emit the target dependent code to load a value of.
virtual RValue EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const =0
EmitVAArg - Emit the target dependent code to load a value of.
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * getBasePointer() const
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
CharUnits getAlignment() const
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::StringRef getName() const
Return the IR name of the pointer value.
llvm::PointerType * getType() const
Return the type of the pointer value.
Address getAddress() const
void setExternallyDestructed(bool destructed=true)
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
const BlockExpr * BlockExpression
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
llvm::Value * CreateIsNull(Address Addr, const Twine &Name="")
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::LoadInst * CreateFlagLoad(llvm::Value *Addr, const llvm::Twine &Name="")
Emit a load from an i1 flag variable.
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Implements C++ ABI-specific code generation functions.
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns 'th...
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
virtual CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, Address This, llvm::Type *Ty, SourceLocation Loc)=0
Build a virtual function pointer in the ABI-specific way.
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(GlobalDecl GD)
Get the type of the implicit "this" parameter used by a method.
virtual AddedStructorArgCounts buildStructorSignature(GlobalDecl GD, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters.
Abstract information about a function or function prototype.
const GlobalDecl getCalleeDecl() const
const FunctionProtoType * getCalleeFunctionProtoType() const
All available information about a concrete callee.
CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const
If this is a delayed callee computation of some sort, prepare a concrete callee.
Address getThisAddress() const
const CallExpr * getVirtualCallExpr() const
llvm::Value * getFunctionPointer() const
llvm::FunctionType * getVirtualFunctionType() const
const CGPointerAuthInfo & getPointerAuthInfo() const
GlobalDecl getVirtualMethodDecl() const
void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy, SourceLocation Loc)
Add heapallocsite metadata for MSAllocator calls.
CGFunctionInfo - Class to encapsulate the information about a function definition.
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
FunctionType::ExtInfo getExtInfo() const
bool isInstanceMethod() const
ABIArgInfo & getReturnInfo()
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
void Profile(llvm::FoldingSetNodeID &ID)
const_arg_iterator arg_begin() const
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
CanQualType getReturnType() const
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, bool delegateCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
bool isCmseNSCall() const
bool isDelegateCall() const
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
ExtParameterInfo getExtParameterInfo(unsigned argIndex) const
CharUnits getArgStructAlignment() const
unsigned arg_size() const
RequiredArgs getRequiredArgs() const
unsigned getNumRequiredArgs() const
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
CallArgList - Type for representing both the value and type of arguments in a call.
llvm::Instruction * getStackBase() const
void addUncopiedAggregate(LValue LV, QualType type)
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
ArrayRef< CallArgCleanup > getCleanupsToDeactivate() const
bool hasWritebacks() const
void add(RValue rvalue, QualType type)
bool isUsingInAlloca() const
Returns if we're using an inalloca struct to pass arguments in memory.
void allocateArgumentMemory(CodeGenFunction &CGF)
void freeArgumentMemory(CodeGenFunction &CGF) const
writeback_const_range writebacks() const
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse, const Expr *writebackExpr=nullptr, llvm::Value *lifetimeSz=nullptr)
static ParamValue forIndirect(Address addr)
static ParamValue forDirect(llvm::Value *value)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize, bool DstIsVolatile)
Create a store to.
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
void EmitPointerAuthOperandBundle(const CGPointerAuthInfo &Info, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
SanitizerSet SanOpts
Sanitizers enabled for this function.
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
static bool hasScalarEvaluationKind(QualType T)
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args, QualType Ty)
bool shouldUseFusedARCCalls()
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
const LangOptions & getLangOpts() const
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
bool InNoConvergentAttributedStmt
True if the current statement has noconvergent attribute.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
const CodeGen::CGBlockInfo * BlockInfo
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
void callCStructDestructor(LValue Dst)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
llvm::Type * ConvertTypeForMem(QualType T)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::BasicBlock * getUnreachableBlock()
bool currentFunctionUsesSEHTry() const
JumpDest ReturnBlock
ReturnBlock - Unified return block.
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
const TargetInfo & getTarget() const
llvm::Value * EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy, QualType RTy)
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
llvm::BasicBlock * getInvokeDest()
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
CGDebugInfo * getDebugInfo()
Address EmitVAListRef(const Expr *E)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
ASTContext & getContext() const
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
void PopCleanupBlock(bool FallThroughIsBranchThrough=false, bool ForDeactivation=false)
PopCleanupBlock - Will pop the cleanup entry on the stack and process all branch fixups.
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Type * ConvertType(QualType T)
void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args)
CodeGenTypes & getTypes() const
void EmitWritebacks(const CallArgList &Args)
EmitWriteback - Emit callbacks for function.
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
static bool hasAggregateEvaluationKind(QualType T)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
const CallExpr * MustTailCall
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo)
EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
llvm::Instruction * CurrentFuncletPad
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
This class organizes the cross-function state that is used while generating LLVM code.
llvm::MDNode * getNoObjCARCExceptionsMetadata()
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
const ABIInfo & getABIInfo()
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses 'fpret' when used as a return type.
DiagnosticsEngine & getDiags() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
const LangOptions & getLangOpts() const
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CodeGenTypes & getTypes()
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
void addUndefinedGlobalForTailCall(std::pair< const FunctionDecl *, SourceLocation > Global)
ObjCEntrypoints & getObjCEntrypoints() const
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
bool shouldEmitConvergenceTokens() const
CGCXXABI & getCXXABI() const
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses 'fp2ret' when used as a return type.
llvm::GlobalVariable::LinkageTypes getFunctionLinkage(GlobalDecl GD)
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when 'sret' is used as a return type.
bool ReturnTypeHasInReg(const CGFunctionInfo &FI)
Return true iff the given type has inreg set.
void AdjustMemoryAttribute(StringRef Name, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs)
Adjust Memory attribute to ensure that the BE gets the right attribute.
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs, unsigned &CallingConv, bool AttrOnCallSite, bool IsThunk)
Get the LLVM attributes and calling convention to use for a particular function type.
ASTContext & getContext() const
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses 'sret' when used as a return type.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs)
Like the overload taking a Function &, but intended specifically for frontends that want to build on ...
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::LLVMContext & getLLVMContext()
CharUnits getMinimumObjectSize(QualType Ty)
Returns the minimum object size for an object of the given type.
bool MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType) const
Whether this function's return type has no side effects, and thus may be trivially discarded if it is...
void valueProfile(CGBuilderTy &Builder, uint32_t ValueKind, llvm::Instruction *ValueSite, llvm::Value *ValuePtr)
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
CGCXXABI & getCXXABI() const
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
ASTContext & getContext() const
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, FnInfoOpts opts, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty)
Arrange the argument and result information for a value of the given freestanding function type.
CanQualType DeriveThisType(const CXXRecordDecl *RD, const CXXMethodDecl *MD)
Derives the 'this' type for codegen purposes, i.e.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i....
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
unsigned getTargetAddressSpace(QualType T) const
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type * >::iterator &TI)
getExpandedTypes - Expand the type
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
llvm::LLVMContext & getLLVMContext()
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes 'this' as the first parameter followed by varargs.
const CGFunctionInfo & arrangeCXXMethodCall(const CallArgList &args, const FunctionProtoType *type, RequiredArgs required, unsigned numPrefixArgs)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments.
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
const CGFunctionInfo & arrangeFunctionDeclaration(const FunctionDecl *FD)
Free functions are functions that are compatible with an ordinary C function pointer type.
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Convert clang calling convention to LLVM callilng convention.
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, given a CXXMethodDecl.
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs=true)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
const CGFunctionInfo & arrangeCXXStructorDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeMSCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT)
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type 'void ()'.
A cleanup scope which generates the cleanup blocks lazily.
EHScopeStack::Cleanup * getCleanup()
Information for lazily generating a cleanup.
virtual bool isRedundantBeforeReturn()
A saved depth on the scope stack.
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
iterator end() const
Returns an iterator pointing to the outermost EH scope.
iterator find(stable_iterator save) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
LValue - This represents an lvalue references.
bool isVolatileQualified() const
LangAS getAddressSpace() const
CharUnits getAlignment() const
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Address getAddress() const
ARCPreciseLifetime_t isARCPreciseLifetime() const
Qualifiers::ObjCLifetime getObjCLifetime() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
bool isVolatileQualified() const
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
An abstract representation of an aligned address.
CharUnits getAlignment() const
Return the alignment of this pointer.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
llvm::Value * getPointer() const
static RawAddress invalid()
A class for recording the number of arguments that a function signature requires.
bool allowsOptionalArgs() const
unsigned getNumRequiredArgs() const
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype, unsigned additional)
Compute the arguments required by the given formal prototype, given that there may be some additional...
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
virtual bool doesReturnSlotInterfereWithArgs() const
doesReturnSlotInterfereWithArgs - Return true if the target uses an argument slot for an 'sret' type.
virtual bool wouldInliningViolateFunctionCallABI(const FunctionDecl *Caller, const FunctionDecl *Callee) const
Returns true if inlining the function call would produce incorrect code for the current target and sh...
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, const FunctionDecl *Callee, const CallArgList &Args, QualType ReturnType) const
Any further codegen related checks that need to be done on a function call in a target specific manne...
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
static void initBranchProtectionFnAttributes(const TargetInfo::BranchProtectionInfo &BPI, llvm::AttrBuilder &FuncAttrs)
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Complex values, per C99 6.2.5p11.
Represents the canonical version of C arrays with a specified constant size.
bool constructsVirtualBase() const
Returns true if the constructed base class is a virtual base class subobject of this declaration's cl...
DeclContext - This is used only as base class of specific decl types that can act as declaration cont...
Decl - This represents one declaration (or definition), e.g.
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
DeclContext * getDeclContext()
SourceLocation getBeginLoc() const LLVM_READONLY
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Represents a member of a struct/union/class.
bool isBitField() const
Determines whether this field is a bitfield.
bool isUnnamedBitField() const
Determines whether this is an unnamed bitfield.
bool isZeroLengthBitField() const
Is this a zero-length bit-field? Such bit-fields aren't really bit-fields at all and instead act as a...
Represents a function declaration or definition.
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Represents a K&R-style 'int foo()' function, which has no information available about its arguments.
Represents a prototype with parameter type info, e.g.
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
unsigned getNumParams() const
unsigned getAArch64SMEAttributes() const
Return a bitmask describing the SME attributes on the function type, see AArch64SMETypeAttributes for...
bool isNothrow(bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type?
Wrapper for source info for functions.
A class which abstracts out some details necessary for making a call.
ExtInfo withCallingConv(CallingConv cc) const
CallingConv getCC() const
ExtInfo withProducesResult(bool producesResult) const
bool getCmseNSCall() const
bool getNoCfCheck() const
unsigned getRegParm() const
bool getNoCallerSavedRegs() const
bool getHasRegParm() const
bool getProducesResult() const
Interesting information about a specific parameter that can't simply be reflected in parameter's type...
ParameterABI getABI() const
Return the ABI treatment of this parameter.
ExtParameterInfo withIsNoEscape(bool NoEscape) const
FunctionType - C99 6.7.5.3 - Function Declarators.
ExtInfo getExtInfo() const
static ArmStateValue getArmZT0State(unsigned AttrBits)
static ArmStateValue getArmZAState(unsigned AttrBits)
QualType getReturnType() const
@ SME_PStateSMEnabledMask
@ SME_PStateSMCompatibleMask
@ SME_AgnosticZAStateMask
GlobalDecl - represents a global declaration.
CXXCtorType getCtorType() const
const Decl * getDecl() const
This class represents temporary values used to represent inout and out arguments in HLSL.
Description of a constructor that was inherited from a base class.
ConstructorUsingShadowDecl * getShadowDecl() const
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
std::vector< std::string > NoBuiltinFuncs
A list of all -fno-builtin-* function names (e.g., memset).
FPExceptionModeKind getDefaultExceptionMode() const
bool isNoBuiltinFunc(StringRef Name) const
Is this a libc/libm function that is no longer recognized as a builtin because a -fno-builtin-* optio...
bool assumeFunctionsAreConvergent() const
Represents a matrix type, as defined in the Matrix Types clang extensions.
Describes a module or submodule.
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
ObjCCategoryDecl - Represents a category declaration.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
bool shouldCopy() const
shouldCopy - True if we should do the 'copy' part of the copy-restore.
Represents an ObjC class declaration.
ObjCMethodDecl - Represents an instance or class method declaration.
ImplicitParamDecl * getSelfDecl() const
ArrayRef< ParmVarDecl * > parameters() const
bool isDirectMethod() const
True if the method is tagged as objc_direct.
QualType getReturnType() const
Represents a parameter to a function.
PointerType - C99 6.7.5.1 - Pointer Declarators.
QualType getPointeeType() const
A (possibly-)qualified type.
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
LangAS getAddressSpace() const
Return the address space of this type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getCanonicalType() const
bool isConstQualified() const
Determine whether this type is const-qualified.
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
LangAS getAddressSpace() const
Represents a struct/union/class.
bool hasFlexibleArrayMember() const
field_iterator field_end() const
field_range fields() const
bool isParamDestroyedInCallee() const
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
field_iterator field_begin() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
RecordDecl * getDecl() const
Base for LValueReferenceType and RValueReferenceType.
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
bool areArgsDestroyedLeftToRightInCallee() const
Are arguments to a call destroyed left to right in the callee? This is a fundamental language change,...
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool useObjCFPRetForRealType(FloatModeKind T) const
Check whether the given real type should use the "fpret" flavor of Objective-C message passing on thi...
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
bool useObjCFP2RetForComplexLongDouble() const
Check whether _Complex long double should use the "fp2ret" flavor of Objective-C message passing on t...
Options for controlling the target.
std::vector< std::string > Features
The list of target specific features to enable or disable – this should be a list of strings starting...
std::string TuneCPU
If given, the name of the target CPU to tune code for.
std::string CPU
If given, the name of the target CPU to generate code for.
The base class of the type hierarchy.
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
bool isBlockPointerType() const
bool isIncompleteArrayType() const
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6....
bool isPointerType() const
CanQualType getCanonicalTypeUnqualified() const
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
const T * castAs() const
Member-template castAs.
bool isReferenceType() const
bool isScalarType() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isBitIntType() const
QualType getCanonicalTypeInternal() const
bool isMemberPointerType() const
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
bool isObjectType() const
Determine whether this type is an object type.
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g....
bool isAnyPointerType() const
const T * getAs() const
Member-template getAs'.
bool isNullPtrType() const
bool isObjCRetainableType() const
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Represents a call to the builtin function __builtin_va_arg.
bool isMicrosoftABI() const
Returns whether this is really a Win64 ABI va_arg expression.
const Expr * getSubExpr() const
Represents a variable declaration or definition.
QualType::DestructionKind needsDestruction(const ASTContext &Ctx) const
Would the destruction of this variable have any effect, and if so, what kind?
Represents a GCC generic vector type.
Defines the clang::TargetInfo interface.
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
void mergeDefaultFunctionDefinitionAttributes(llvm::Function &F, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, const TargetOptions &TargetOpts, bool WillInternalize)
Adds attributes to F according to our CodeGenOpts and LangOpts, as though we had emitted it ourselves...
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="")
Clean up any erroneous/redundant code in the given Ranges in Code.
bool This(InterpState &S, CodePtr OpPC)
bool Zero(InterpState &S, CodePtr OpPC)
bool Load(InterpState &S, CodePtr OpPC)
bool Ret(InterpState &S, CodePtr &PC)
RangeSelector name(std::string ID)
Given a node with a "name", (like NamedDecl, DeclRefExpr, CxxCtorInitializer, and TypeLoc) selects th...
The JSON file list parser is used to communicate input to InstallAPI.
CXXCtorType
C++ constructor types.
@ Ctor_DefaultClosure
Default closure variant of a ctor.
@ Ctor_CopyingClosure
Copying closure variant of a ctor.
@ Ctor_Complete
Complete object ctor.
bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType)
bool isInstanceMethod(const Decl *D)
@ NonNull
Values of this type can never be null.
@ OK_Ordinary
An ordinary object is located at an address in memory.
@ Vector
'vector' clause, allowed on 'loop', Combined, and 'routine' directives.
Linkage
Describes the different kinds of linkage (C++ [basic.link], C99 6.2.2) that an entity may have.
@ Result
The result type of a method or function.
@ SwiftAsyncContext
This parameter (which must have pointer type) uses the special Swift asynchronous context-pointer ABI...
@ SwiftErrorResult
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
@ Ordinary
This parameter uses ordinary ABI rules for its type.
@ SwiftIndirectResult
This parameter (which must have pointer type) is a Swift indirect result parameter.
@ SwiftContext
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment.
@ Dtor_Complete
Complete object dtor.
@ CanPassInRegs
The argument of this type can be passed directly in registers.
const FunctionProtoType * T
CallingConv
CallingConv - Specifies the calling convention that a function uses.
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
Similar to AddedStructorArgs, but only notes the number of additional arguments.
llvm::Value * ToUse
A value to "use" after the writeback, or null.
LValue Source
The original argument.
Address Temporary
The temporary alloca.
const Expr * WritebackExpr
An Expression (optional) that performs the writeback with any required casting.
LValue getKnownLValue() const
RValue getKnownRValue() const
void copyInto(CodeGenFunction &CGF, Address A) const
RValue getRValue(CodeGenFunction &CGF) const
llvm::BasicBlock * getBlock() const
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::CallingConv::ID getRuntimeCC() const
llvm::IntegerType * SizeTy
llvm::IntegerType * Int32Ty
llvm::IntegerType * IntPtrTy
llvm::PointerType * Int8PtrTy
CharUnits getPointerAlign() const
LangAS getASTAllocaAddressSpace() const
bool isMSVCXXPersonality() const
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
llvm::Function * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
llvm::Function * objc_retain
id objc_retain(id);
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain.
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Iterator for iterating over Stmt * arrays that contain only T *.