LLVM: lib/Target/AArch64/GISel/AArch64CallLowering.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

14

47#include

48#include

49#include

50

51#define DEBUG_TYPE "aarch64-call-lowering"

52

53using namespace llvm;

54using namespace AArch64GISelUtils;

55

57

60

62 MVT &LocVT) {

63

64

65

66

67

68

69 if (OrigVT == MVT::i1 || OrigVT == MVT::i8)

70 ValVT = LocVT = MVT::i8;

71 else if (OrigVT == MVT::i16)

72 ValVT = LocVT = MVT::i16;

73}

74

75

78 return (ValVT == MVT::i8 || ValVT == MVT::i16) ? LLT(ValVT)

80}

81

82namespace {

83

84struct AArch64IncomingValueAssigner

86 AArch64IncomingValueAssigner(CCAssignFn *AssignFn_,

88 : IncomingValueAssigner(AssignFn_, AssignFnVarArg_) {}

89

90 bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,

93 CCState &State) override {

95 return IncomingValueAssigner::assignArg(ValNo, OrigVT, ValVT, LocVT,

96 LocInfo, Info, Flags, State);

97 }

98};

99

100struct AArch64OutgoingValueAssigner

103

104

105

106

107 bool IsReturn;

108

109 AArch64OutgoingValueAssigner(CCAssignFn *AssignFn_,

112 bool IsReturn)

113 : OutgoingValueAssigner(AssignFn_, AssignFnVarArg_),

114 Subtarget(Subtarget_), IsReturn(IsReturn) {}

115

116 bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,

119 CCState &State) override {

121 bool IsCalleeWin =

123 bool UseVarArgsCCForFixed = IsCalleeWin && State.isVarArg();

124

125 bool Res;

126 if (Info.IsFixed && !UseVarArgsCCForFixed) {

127 if (!IsReturn)

129 Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);

130 } else

131 Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, State);

132

134 return Res;

135 }

136};

137

140 : IncomingValueHandler(MIRBuilder, MRI) {}

141

145 auto &MFI = MIRBuilder.getMF().getFrameInfo();

146

147

148

149 const bool IsImmutable = Flags.isByVal();

150

151 int FI = MFI.CreateFixedObject(Size, Offset, IsImmutable);

153 auto AddrReg = MIRBuilder.buildFrameIndex(LLT::pointer(0, 64), FI);

154 return AddrReg.getReg(0);

155 }

156

159

160

161 if (Flags.isPointer())

164 }

165

168 markRegUsed(PhysReg);

169 IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);

170 }

171

176

179

180

183 else {

184

185

187 LocTy = MemTy;

188 }

189

193

195 case CCValAssign::LocInfo::ZExt:

196 MIRBuilder.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, ValVReg, Addr, *MMO);

197 return;

198 case CCValAssign::LocInfo::SExt:

199 MIRBuilder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, ValVReg, Addr, *MMO);

200 return;

201 default:

202 MIRBuilder.buildLoad(ValVReg, Addr, *MMO);

203 return;

204 }

205 }

206

207

208

209

210 virtual void markRegUsed(Register Reg) = 0;

211};

212

216

217 void markRegUsed(Register Reg) override {

220 }

221};

222

223struct CallReturnHandler : public IncomingArgHandler {

226 : IncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {}

227

228 void markRegUsed(Register Reg) override {

230 }

231

233};

234

235

236struct ReturnedArgCallReturnHandler : public CallReturnHandler {

240 : CallReturnHandler(MIRBuilder, MRI, MIB) {}

241

242 void markRegUsed(Register Reg) override {}

243};

244

248 int FPDiff = 0)

249 : OutgoingValueHandler(MIRBuilder, MRI), MIB(MIB), IsTailCall(IsTailCall),

250 FPDiff(FPDiff),

251 Subtarget(MIRBuilder.getMF().getSubtarget<AArch64Subtarget>()) {}

252

259

260 if (IsTailCall) {

261 assert(Flags.isByVal() && "byval unhandled with tail calls");

262

265 auto FIReg = MIRBuilder.buildFrameIndex(p0, FI);

267 return FIReg.getReg(0);

268 }

269

271 SPReg = MIRBuilder.buildCopy(p0, Register(AArch64::SP)).getReg(0);

272

273 auto OffsetReg = MIRBuilder.buildConstant(s64, Offset);

274

275 auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg);

276

278 return AddrReg.getReg(0);

279 }

280

281

282

283

284

287 if (Flags.isPointer())

290 }

291

295 Register ExtReg = extendRegister(ValVReg, VA);

296 MIRBuilder.buildCopy(PhysReg, ExtReg);

297 }

298

305 MIRBuilder.buildStore(ValVReg, Addr, *MMO);

306 }

307

313

314

316 MaxSize = 0;

317

319 if (VA.getLocInfo() != CCValAssign::LocInfo::FPExt) {

322

326 }

327

328 ValVReg = extendRegister(ValVReg, VA, MaxSize);

329 } else {

330

332 }

333

334 assignValueToAddress(ValVReg, Addr, MemTy, MPO, VA);

335 }

336

338

339 bool IsTailCall;

340

341

342

343 int FPDiff;

344

345

347

349};

350}

351

355}

356

358 const Value *Val,

361 Register SwiftErrorVReg) const {

363 assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&

364 "Return value without a vreg");

365

367 if (!FLI.CanLowerReturn) {

369 } else if (!VRegs.empty()) {

373

377 auto &DL = F.getDataLayout();

379

383 "For each split Type there should be exactly one VReg.");

384

387

388 for (unsigned i = 0; i < SplitEVTs.size(); ++i) {

390 ArgInfo CurArgInfo = ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx), 0};

392

393

394

395 auto &Flags = CurArgInfo.Flags[0];

397 !Flags.isSExt() && !Flags.isZExt()) {

400 1) {

401

403 if (EVT(NewVT) != SplitEVTs[i]) {

404 unsigned ExtendOp = TargetOpcode::G_ANYEXT;

405 if (F.getAttributes().hasRetAttr(Attribute::SExt))

406 ExtendOp = TargetOpcode::G_SEXT;

407 else if (F.getAttributes().hasRetAttr(Attribute::ZExt))

408 ExtendOp = TargetOpcode::G_ZEXT;

409

410 LLT NewLLT(NewVT);

413

414

418 CurVReg =

421 } else {

422

423 CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg})

425 }

428

429

430

431 CurVReg =

434 } else {

436 return false;

437 }

438 } else {

439

440

441

442 if (NewLLT != MRI.getType(CurVReg)) {

443

444 CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg})

446 }

447 }

448 }

449 }

450 if (CurVReg != CurArgInfo.Regs[0]) {

451 CurArgInfo.Regs[0] = CurVReg;

452

454 }

456 }

457

458 AArch64OutgoingValueAssigner Assigner(AssignFn, AssignFn, Subtarget,

459 true);

460 OutgoingArgHandler Handler(MIRBuilder, MRI, MIB);

462 MIRBuilder, CC, F.isVarArg());

463 }

464

465 if (SwiftErrorVReg) {

467 MIRBuilder.buildCopy(AArch64::X21, SwiftErrorVReg);

468 }

469

472}

473

477 bool IsVarArg) const {

479 const auto &TLI = *getTLI();

480 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs,

482

483 return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv));

484}

485

486

487

488

494

496 return;

497

500 assert(F.isVarArg() && "Expected F to be vararg?");

501

502

504 CCState CCInfo(F.getCallingConv(), true, MF, ArgLocs,

505 F.getContext());

507 RegParmTypes.push_back(MVT::i64);

508 RegParmTypes.push_back(MVT::f128);

509

510

514

515

516

518 Register X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass);

520 }

521

522

523 for (const auto &F : Forwards) {

526 }

527}

528

531 if (EnableSVEGISel && (F.getReturnType()->isScalableTy() ||

533 return A.getType()->isScalableTy();

534 })))

535 return true;

537 if (!ST.hasNEON() || !ST.hasFPARMv8()) {

538 LLVM_DEBUG(dbgs() << "Falling back to SDAG because we don't support no-NEON\n");

539 return true;

540 }

541

543 if (Attrs.hasZAState() || Attrs.hasZT0State() ||

544 Attrs.hasStreamingInterfaceOrBody() ||

545 Attrs.hasStreamingCompatibleInterface())

546 return true;

547

548 return false;

549}

550

551void AArch64CallLowering::saveVarArgRegisters(

556

562 bool IsWin64CC = Subtarget.isCallingConvWin64(CCInfo.getCallingConv(),

566

568 unsigned NumVariadicGPRArgRegs = GPRArgRegs.size() - FirstVariadicGPR + 1;

569

570 unsigned GPRSaveSize = 8 * (GPRArgRegs.size() - FirstVariadicGPR);

571 int GPRIdx = 0;

572 if (GPRSaveSize != 0) {

573 if (IsWin64CC) {

575 -static_cast<int>(GPRSaveSize), false);

576 if (GPRSaveSize & 15)

577

579 -static_cast<int>(alignTo(GPRSaveSize, 16)),

580 false);

581 } else

583

586 MIRBuilder.buildConstant(MRI.createGenericVirtualRegister(s64), 8);

587

588 for (unsigned i = FirstVariadicGPR; i < GPRArgRegs.size(); ++i) {

589 Register Val = MRI.createGenericVirtualRegister(s64);

595 MF, GPRIdx, (i - FirstVariadicGPR) * 8)

598

599 FIN = MIRBuilder.buildPtrAdd(MRI.createGenericVirtualRegister(p0),

600 FIN.getReg(0), Offset);

601 }

602 }

605

606 if (Subtarget.hasFPARMv8() && !IsWin64CC) {

608

609 unsigned FPRSaveSize = 16 * (FPRArgRegs.size() - FirstVariadicFPR);

610 int FPRIdx = 0;

611 if (FPRSaveSize != 0) {

613

616 MIRBuilder.buildConstant(MRI.createGenericVirtualRegister(s64), 16);

617

618 for (unsigned i = FirstVariadicFPR; i < FPRArgRegs.size(); ++i) {

625

628

629 FIN = MIRBuilder.buildPtrAdd(MRI.createGenericVirtualRegister(p0),

630 FIN.getReg(0), Offset);

631 }

632 }

635 }

636}

637

644 auto &DL = F.getDataLayout();

646

647

648

649 if (F.isVarArg() && Subtarget.isWindowsArm64EC())

650 return false;

651

652

653

656 return false;

657

658 bool IsWin64 =

659 Subtarget.isCallingConvWin64(F.getCallingConv(), F.isVarArg()) &&

660 !Subtarget.isWindowsArm64EC();

661

664

665

666

667 if (!FLI.CanLowerReturn)

669

670 unsigned i = 0;

671 for (auto &Arg : F.args()) {

672 if (DL.getTypeStoreSize(Arg.getType()).isZero())

673 continue;

674

675 ArgInfo OrigArg{VRegs[i], Arg, i};

677

678

679

680 if (OrigArg.Ty->isIntegerTy(1)) {

681 assert(OrigArg.Regs.size() == 1 &&

682 MRI.getType(OrigArg.Regs[0]).getSizeInBits() == 1 &&

683 "Unexpected registers used for i1 arg");

684

685 auto &Flags = OrigArg.Flags[0];

686 if (!Flags.isZExt() && !Flags.isSExt()) {

687

688 Register OrigReg = OrigArg.Regs[0];

690 OrigArg.Regs[0] = WideReg;

691 BoolArgs.push_back({OrigReg, WideReg});

692 }

693 }

694

695 if (Arg.hasAttribute(Attribute::SwiftAsync))

697

699 ++i;

700 }

701

704

707

708 AArch64IncomingValueAssigner Assigner(AssignFn, AssignFn);

711 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());

713 handleAssignments(Handler, SplitArgs, CCInfo, ArgLocs, MIRBuilder))

714 return false;

715

716 if (!BoolArgs.empty()) {

717 for (auto &KV : BoolArgs) {

719 Register WideReg = KV.second;

720 LLT WideTy = MRI.getType(WideReg);

721 assert(MRI.getType(OrigReg).getScalarSizeInBits() == 1 &&

722 "Unexpected bit size of a bool arg");

725 }

726 }

727

729 uint64_t StackSize = Assigner.StackSize;

730 if (F.isVarArg()) {

731 if ((!Subtarget.isTargetDarwin() && !Subtarget.isWindowsArm64EC()) || IsWin64) {

732

733

734

735

736

737 saveVarArgRegisters(MIRBuilder, Handler, CCInfo);

738 } else if (Subtarget.isWindowsArm64EC()) {

739 return false;

740 }

741

742

743 StackSize = alignTo(Assigner.StackSize, Subtarget.isTargetILP32() ? 4 : 8);

744

747 }

748

751

752

753 StackSize = alignTo(StackSize, 16);

754

755

756

758

759

760

761 }

762

763

764

765

766

768

769 if (Subtarget.hasCustomCallingConv())

770 Subtarget.getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF);

771

773

774

776

777 return true;

778}

779

780

784}

785

786

788 switch (CC) {

797 return true;

798 default:

799 return false;

800 }

801}

802

803

804

805static std::pair<CCAssignFn *, CCAssignFn *>

808}

809

810bool AArch64CallLowering::doCallerAndCalleePassArgsTheSameWay(

816

817

818 if (CalleeCC == CallerCC)

819 return true;

820

821

825 std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) =

827

830 std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) =

832

833 AArch64IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed,

834 CalleeAssignFnVarArg);

835 AArch64IncomingValueAssigner CallerAssigner(CallerAssignFnFixed,

836 CallerAssignFnVarArg);

837

838 if (resultsCompatible(Info, MF, InArgs, CalleeAssigner, CallerAssigner))

839 return false;

840

841

843 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);

844 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);

846 TRI->UpdateCustomCallPreservedMask(MF, &CallerPreserved);

847 TRI->UpdateCustomCallPreservedMask(MF, &CalleePreserved);

848 }

849

850 return TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved);

851}

852

853bool AArch64CallLowering::areCalleeOutgoingArgsTailCallable(

856

857 if (OrigOutArgs.empty())

858 return true;

859

866

869 std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);

870

871

873 CCState OutInfo(CalleeCC, false, MF, OutLocs, Ctx);

874

875 AArch64OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg,

876 Subtarget, false);

877

881 LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n");

882 return false;

883 }

884

885

888 LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n");

889 return false;

890 }

891

892

893

894

896 const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC);

898

899 if (Info.IsVarArg) {

900

901

902

903

904

905 for (unsigned i = 0; i < OutLocs.size(); ++i) {

906 auto &ArgLoc = OutLocs[i];

907 if (ArgLoc.isRegLoc())

908 continue;

909

912 << "... Cannot tail call vararg function with stack arguments\n");

913 return false;

914 }

915 }

916

918}

919

924

925

926 if (Info.IsTailCall)

927 return false;

928

932

933 LLVM_DEBUG(dbgs() << "Attempting to lower call as tail call\n");

934

935 if (Info.SwiftErrorVReg) {

936

937

938

939

940 LLVM_DEBUG(dbgs() << "... Cannot handle tail calls with swifterror yet.\n");

941 return false;

942 }

943

945 LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n");

946 return false;

947 }

948

949

950

951

952

953

954

955

956

957

958

959

960

961

962

963

964

965

967 return A.hasByValAttr() || A.hasInRegAttr() || A.hasSwiftErrorAttr();

968 })) {

969 LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval, "

970 "inreg, or swifterror arguments\n");

971 return false;

972 }

973

974

975

976

977

978

979

980

981 if (Info.Callee.isGlobal()) {

985 (!TT.isOSWindows() || TT.isOSBinFormatELF() ||

986 TT.isOSBinFormatMachO())) {

987 LLVM_DEBUG(dbgs() << "... Cannot tail call externally-defined function "

988 "with weak linkage for this OS.\n");

989 return false;

990 }

991 }

992

993

996

997

998

999

1000

1001

1003 "Unexpected variadic calling convention");

1004

1005

1006

1007 if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) {

1010 << "... Caller and callee have incompatible calling conventions.\n");

1011 return false;

1012 }

1013

1014 if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs))

1015 return false;

1016

1018 dbgs() << "... Call is eligible for tail call optimization.\n");

1019 return true;

1020}

1021

1023 bool IsTailCall,

1024 std::optionalCallLowering::PtrAuthInfo &PAI,

1027

1028 if (!IsTailCall) {

1029 if (!PAI)

1031

1032 assert(IsIndirect && "Direct call should not be authenticated");

1034 "Invalid auth call key");

1035 return AArch64::BLRA;

1036 }

1037

1038 if (!IsIndirect)

1039 return AArch64::TCRETURNdi;

1040

1041

1042

1045 assert(!PAI && "ptrauth tail-calls not yet supported with PAuthLR");

1046 return AArch64::TCRETURNrix17;

1047 }

1048 if (PAI)

1049 return AArch64::AUTH_TCRETURN_BTI;

1050 return AArch64::TCRETURNrix16x17;

1051 }

1052

1054 assert(!PAI && "ptrauth tail-calls not yet supported with PAuthLR");

1055 return AArch64::TCRETURNrinotx16;

1056 }

1057

1058 if (PAI)

1059 return AArch64::AUTH_TCRETURN;

1060 return AArch64::TCRETURNri;

1061}

1062

1068 if (!OutArgs.empty() && OutArgs[0].Flags[0].isReturned()) {

1069

1070 Mask = TRI.getThisReturnPreservedMask(MF, Info.CallConv);

1071 if (!Mask) {

1072 OutArgs[0].Flags[0].setReturned(false);

1073 Mask = TRI.getCallPreservedMask(MF, Info.CallConv);

1074 }

1075 } else {

1076 Mask = TRI.getCallPreservedMask(MF, Info.CallConv);

1077 }

1078 return Mask;

1079}

1080

1081bool AArch64CallLowering::lowerTailCall(

1089

1090

1094

1095

1099 std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);

1100

1102 if (!IsSibCall)

1103 CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);

1104

1108

1109

1112

1113

1114

1115 MIB.addImm(0);

1116

1117

1118 if (Opc == AArch64::AUTH_TCRETURN || Opc == AArch64::AUTH_TCRETURN_BTI) {

1121 "Invalid auth call key");

1122 MIB.addImm(Info.PAI->Key);

1123

1126 std::tie(IntDisc, AddrDisc) =

1128

1129 MIB.addImm(IntDisc);

1130 MIB.addUse(AddrDisc);

1131 if (AddrDisc != AArch64::NoRegister) {

1135 MIB->getOperand(4), 4));

1136 }

1137 }

1138

1139

1140 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC);

1142 TRI->UpdateCustomCallPreservedMask(MF, &Mask);

1143 MIB.addRegMask(Mask);

1144

1145 if (Info.CFIType)

1146 MIB->setCFIType(MF, Info.CFIType->getZExtValue());

1147

1148 if (TRI->isAnyArgRegReserved(MF))

1149 TRI->emitReservedArgRegCallError(MF);

1150

1151

1152

1153

1154

1155

1156 int FPDiff = 0;

1157

1158

1159

1160

1161 unsigned NumBytes = 0;

1162 if (!IsSibCall) {

1163

1164

1165

1168 CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext());

1169

1170 AArch64OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg,

1171 Subtarget, false);

1173 return false;

1174

1175

1176

1177 NumBytes = alignTo(OutInfo.getStackSize(), 16);

1178

1179

1180

1181

1182 FPDiff = NumReusableBytes - NumBytes;

1183

1184

1185

1186 if (FPDiff < 0 && FuncInfo->getTailCallReservedStack() < (unsigned)-FPDiff)

1188

1189

1190

1191

1192

1193

1194 assert(FPDiff % 16 == 0 && "unaligned stack on tail call");

1195 }

1196

1198

1199 AArch64OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg,

1200 Subtarget, false);

1201

1202

1203 OutgoingArgHandler Handler(MIRBuilder, MRI, MIB,

1204 true, FPDiff);

1206 CalleeCC, Info.IsVarArg))

1207 return false;

1208

1210

1211 if (Info.IsVarArg && Info.IsMustTailCall) {

1212

1213

1214

1215 for (const auto &F : Forwards) {

1217

1218

1220 if (!Use.isReg())

1221 return false;

1222 return TRI->regsOverlap(Use.getReg(), ForwardedReg);

1223 }))

1224 continue;

1225

1226

1229 }

1230 }

1231

1232

1233

1234 if (!IsSibCall) {

1237

1238

1239

1240

1242 }

1243

1244

1246

1247

1248

1249 if (MIB->getOperand(0).isReg())

1252 MIB->getDesc(), MIB->getOperand(0), 0);

1253

1255 Info.LoweredTailCall = true;

1256 return true;

1257}

1258

1264 auto &DL = F.getDataLayout();

1267

1268

1269

1270

1271

1273 return false;

1274

1275

1276

1279 return false;

1280

1282 for (auto &OrigArg : Info.OrigArgs) {

1284

1285 auto &Flags = OrigArg.Flags[0];

1286 if (OrigArg.Ty->isIntegerTy(1) && !Flags.isSExt() && !Flags.isZExt()) {

1289 MRI.getType(OutArg.Regs[0]).getSizeInBits() == 1 &&

1290 "Unexpected registers used for i1 arg");

1291

1292

1293

1294 OutArg.Regs[0] =

1298 }

1299 }

1300

1302 if (Info.OrigRet.Ty->isVoidTy())

1304

1305

1306 bool CanTailCallOpt =

1308

1309

1310 if (Info.IsMustTailCall && !CanTailCallOpt) {

1311

1312

1313

1314 LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n");

1315 return false;

1316 }

1317

1318 Info.IsTailCall = CanTailCallOpt;

1319 if (CanTailCallOpt)

1320 return lowerTailCall(MIRBuilder, Info, OutArgs);

1321

1322

1325 std::tie(AssignFnFixed, AssignFnVarArg) =

1327

1329 CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);

1330

1331

1332

1333

1334 unsigned Opc = 0;

1335

1336

1337

1339 Opc = Info.PAI ? AArch64::BLRA_RVMARKER : AArch64::BLR_RVMARKER;

1340

1341

1342 else if (Info.CB && Info.CB->hasFnAttr(Attribute::ReturnsTwice) &&

1343 !Subtarget.noBTIAtReturnTwice() &&

1345 Opc = AArch64::BLR_BTI;

1346 else {

1347

1348

1349 if (Info.Callee.isSymbol() && F.getParent()->getRtLibUseGOT()) {

1350 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_GLOBAL_VALUE);

1354 }

1356 }

1357

1359 unsigned CalleeOpNo = 0;

1360

1361 if (Opc == AArch64::BLR_RVMARKER || Opc == AArch64::BLRA_RVMARKER) {

1362

1363

1365 MIB.addGlobalAddress(ARCFn);

1366 ++CalleeOpNo;

1367 } else if (Info.CFIType) {

1368 MIB->setCFIType(MF, Info.CFIType->getZExtValue());

1369 }

1370

1371 MIB.add(Info.Callee);

1372

1373

1376

1377 AArch64OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg,

1378 Subtarget, false);

1379

1380 OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, false);

1382 Info.CallConv, Info.IsVarArg))

1383 return false;

1384

1386

1387 if (Opc == AArch64::BLRA || Opc == AArch64::BLRA_RVMARKER) {

1390 "Invalid auth call key");

1391 MIB.addImm(Info.PAI->Key);

1392

1395 std::tie(IntDisc, AddrDisc) =

1397

1398 MIB.addImm(IntDisc);

1399 MIB.addUse(AddrDisc);

1400 if (AddrDisc != AArch64::NoRegister) {

1403 MIB->getDesc(), MIB->getOperand(CalleeOpNo + 3),

1404 CalleeOpNo + 3);

1405 }

1406 }

1407

1408

1410 TRI->UpdateCustomCallPreservedMask(MF, &Mask);

1411 MIB.addRegMask(Mask);

1412

1413 if (TRI->isAnyArgRegReserved(MF))

1414 TRI->emitReservedArgRegCallError(MF);

1415

1416

1418

1422 ? alignTo(Assigner.StackSize, 16)

1423 : 0;

1424

1425 CallSeqStart.addImm(Assigner.StackSize).addImm(0);

1426 MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP)

1427 .addImm(Assigner.StackSize)

1428 .addImm(CalleePopBytes);

1429

1430

1431

1432

1433 if (MIB->getOperand(CalleeOpNo).isReg())

1436 MIB->getOperand(CalleeOpNo), CalleeOpNo);

1437

1438

1439

1440

1441 if (Info.CanLowerReturn && Info.OrigRet.Ty->isVoidTy()) {

1443 CallReturnHandler Handler(MIRBuilder, MRI, MIB);

1444 bool UsingReturnedArg =

1445 !OutArgs.empty() && OutArgs[0].Flags[0].isReturned();

1446

1447 AArch64OutgoingValueAssigner Assigner(RetAssignFn, RetAssignFn, Subtarget,

1448 false);

1449 ReturnedArgCallReturnHandler ReturnedArgHandler(MIRBuilder, MRI, MIB);

1451 UsingReturnedArg ? ReturnedArgHandler : Handler, Assigner, InArgs,

1452 MIRBuilder, Info.CallConv, Info.IsVarArg,

1453 UsingReturnedArg ? ArrayRef(OutArgs[0].Regs)

1455 return false;

1456 }

1457

1458 if (Info.SwiftErrorVReg) {

1461 }

1462

1463 if (Info.CanLowerReturn) {

1465 Info.DemoteRegister, Info.DemoteStackIndex);

1466 }

1467 return true;

1468}

1469

1472}

unsigned const MachineRegisterInfo * MRI

static void handleMustTailForwardedRegisters(MachineIRBuilder &MIRBuilder, CCAssignFn *AssignFn)

Helper function to compute forwarded registers for musttail calls.

cl::opt< bool > EnableSVEGISel

static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect, bool IsTailCall, std::optional< CallLowering::PtrAuthInfo > &PAI, MachineRegisterInfo &MRI)

static LLT getStackValueStoreTypeHack(const CCValAssign &VA)

static const uint32_t * getMaskForArgs(SmallVectorImpl< AArch64CallLowering::ArgInfo > &OutArgs, AArch64CallLowering::CallLoweringInfo &Info, const AArch64RegisterInfo &TRI, MachineFunction &MF)

static void applyStackPassedSmallTypeDAGHack(EVT OrigVT, MVT &ValVT, MVT &LocVT)

static std::pair< CCAssignFn *, CCAssignFn * > getAssignFnsForCC(CallingConv::ID CC, const AArch64TargetLowering &TLI)

Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for CC.

static bool doesCalleeRestoreStack(CallingConv::ID CallConv, bool TailCallOpt)

This file describes how to lower LLVM calls to machine code calls.

static std::tuple< SDValue, SDValue > extractPtrauthBlendDiscriminators(SDValue Disc, SelectionDAG *DAG)

static const MCPhysReg GPRArgRegs[]

static const MCPhysReg FPRArgRegs[]

cl::opt< bool > EnableSVEGISel("aarch64-enable-gisel-sve", cl::Hidden, cl::desc("Enable / disable SVE scalable vectors in Global ISel"), cl::init(false))

static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls)

Return true if the calling convention is one that we can guarantee TCO for.

static bool mayTailCallThisCC(CallingConv::ID CC)

Return true if we might ever do TCO for calls with this calling convention.

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

This file contains the simple types necessary to represent the attributes associated with functions a...

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

Analysis containing CSE Info

Implement a low-level type suitable for MachineInstr level instruction selection.

This file declares the MachineIRBuilder class.

unsigned const TargetRegisterInfo * TRI

static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)

This file defines ARC utility functions which are used by various parts of the compiler.

static constexpr Register SPReg

assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())

This file defines the SmallVector class.

bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef< Register > VRegs, FunctionLoweringInfo &FLI, Register SwiftErrorVReg) const override

This hook must be implemented to lower outgoing return values, described by Val, into the specified v...

bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv, SmallVectorImpl< BaseArgInfo > &Outs, bool IsVarArg) const override

This hook must be implemented to check whether the return values described by Outs can fit into the r...

bool fallBackToDAGISel(const MachineFunction &MF) const override

bool isTypeIsValidForThisReturn(EVT Ty) const override

For targets which support the "returned" parameter attribute, returns true if the given type is a val...

bool isEligibleForTailCallOptimization(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info, SmallVectorImpl< ArgInfo > &InArgs, SmallVectorImpl< ArgInfo > &OutArgs) const

Returns true if the call can be lowered as a tail call.

AArch64CallLowering(const AArch64TargetLowering &TLI)

bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const override

This hook must be implemented to lower the given call instruction, including argument and return valu...

bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< ArrayRef< Register > > VRegs, FunctionLoweringInfo &FLI) const override

This hook must be implemented to lower the incoming (formal) arguments, described by VRegs,...

AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...

bool branchTargetEnforcement() const

void setVarArgsStackIndex(int Index)

void setTailCallReservedStack(unsigned bytes)

SmallVectorImpl< ForwardedRegister > & getForwardedMustTailRegParms()

void setBytesInStackArgArea(unsigned bytes)

void setVarArgsGPRIndex(int Index)

bool branchProtectionPAuthLR() const

void setVarArgsFPRSize(unsigned Size)

unsigned getBytesInStackArgArea() const

void setVarArgsFPRIndex(int Index)

void setVarArgsGPRSize(unsigned Size)

void setArgumentStackToRestore(unsigned bytes)

const AArch64RegisterInfo * getRegisterInfo() const override

const AArch64InstrInfo * getInstrInfo() const override

bool isWindowsArm64EC() const

bool isCallingConvWin64(CallingConv::ID CC, bool IsVarArg) const

const RegisterBankInfo * getRegBankInfo() const override

bool hasCustomCallingConv() const

MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override

Certain combinations of ABIs, Targets and features require that types are legal for some operations a...

unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override

Certain targets require unusual breakdowns of certain types.

CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC) const

Selects the correct CCAssignFn for a given CallingConvention value.

CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const

Selects the correct CCAssignFn for a given CallingConvention value.

This class represents an incoming formal argument to a Function.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

size_t size() const

size - Get the array size.

bool empty() const

empty - Check if the array is empty.

CCState - This class holds information needed while lowering arguments and return values.

MachineFunction & getMachineFunction() const

unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const

getFirstUnallocated - Return the index of the first unallocated register in the set,...

void analyzeMustTailForwardedRegisters(SmallVectorImpl< ForwardedRegister > &Forwards, ArrayRef< MVT > RegParmTypes, CCAssignFn Fn)

Compute the set of registers that need to be preserved and forwarded to any musttail calls.

CallingConv::ID getCallingConv() const

uint64_t getStackSize() const

Returns the size of the currently allocated portion of the stack.

bool isAllocated(MCRegister Reg) const

isAllocated - Return true if the specified register (or an alias) is allocated.

CCValAssign - Represent assignment of one arg/retval to a location.

LocInfo getLocInfo() const

static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)

void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg, int FI) const

Load the returned value from the stack into virtual registers in VRegs.

bool handleAssignments(ValueHandler &Handler, SmallVectorImpl< ArgInfo > &Args, CCState &CCState, SmallVectorImpl< CCValAssign > &ArgLocs, MachineIRBuilder &MIRBuilder, ArrayRef< Register > ThisReturnRegs={}) const

Use Handler to insert code to handle the argument/return values represented by Args.

bool resultsCompatible(CallLoweringInfo &Info, MachineFunction &MF, SmallVectorImpl< ArgInfo > &InArgs, ValueAssigner &CalleeAssigner, ValueAssigner &CallerAssigner) const

void splitToValueTypes(const ArgInfo &OrigArgInfo, SmallVectorImpl< ArgInfo > &SplitArgs, const DataLayout &DL, CallingConv::ID CallConv, SmallVectorImpl< uint64_t > *Offsets=nullptr) const

Break OrigArgInfo into one or more pieces the calling convention can process, returned in SplitArgs.

void insertSRetIncomingArgument(const Function &F, SmallVectorImpl< ArgInfo > &SplitArgs, Register &DemoteReg, MachineRegisterInfo &MRI, const DataLayout &DL) const

Insert the hidden sret ArgInfo to the beginning of SplitArgs.

bool determineAndHandleAssignments(ValueHandler &Handler, ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, bool IsVarArg, ArrayRef< Register > ThisReturnRegs={}) const

Invoke ValueAssigner::assignArg on each of the given Args and then use Handler to move them to the as...

void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg) const

Store the return value given by VRegs into stack starting at the offset specified in DemoteReg.

bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< ArgInfo > &OutVals) const

Check whether parameters to a call that are passed in callee saved registers are the same as from the...

bool determineAssignments(ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, CCState &CCInfo) const

Analyze the argument list in Args, using Assigner to populate CCInfo.

bool checkReturn(CCState &CCInfo, SmallVectorImpl< BaseArgInfo > &Outs, CCAssignFn *Fn) const

void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const

A parsed version of the target data layout string in and methods for querying it.

void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const

FunctionLoweringInfo - This contains information that is global to a function that is used when lower...

iterator_range< arg_iterator > args()

CallingConv::ID getCallingConv() const

getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...

LLVMContext & getContext() const

getContext - Return a reference to the LLVMContext associated with this function.

bool isVarArg() const

isVarArg - Return true if this function takes a variable number of arguments.

bool hasExternalWeakLinkage() const

static constexpr LLT scalar(unsigned SizeInBits)

Get a low-level scalar or aggregate "bag of bits".

constexpr uint16_t getNumElements() const

Returns the number of elements in a vector LLT.

constexpr bool isVector() const

static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)

Get a low-level pointer in the given address space.

constexpr TypeSize getSizeInBits() const

Returns the total size of the type. Must only be called on sized types.

constexpr TypeSize getSizeInBytes() const

Returns the total size of the type in bytes, i.e.

This is an important class for using LLVM in a threaded context.

bool isVector() const

Return true if this is a vector value type.

void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())

Adds the specified register as a live in.

The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.

int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)

Create a new object at a fixed location on the stack.

int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)

Create a new statically sized stack object, returning a nonnegative identifier to represent it.

void setHasTailCall(bool V=true)

bool hasMustTailInVarArgFunc() const

Returns true if the function is variadic and contains a musttail call.

const TargetSubtargetInfo & getSubtarget() const

getSubtarget - Return the subtarget for which this machine code is being compiled.

MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)

getMachineMemOperand - Allocate a new MachineMemOperand.

MachineFrameInfo & getFrameInfo()

getFrameInfo - Return the frame info object for the current function.

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

Function & getFunction()

Return the LLVM function that this machine code represents.

Ty * getInfo()

getInfo - Keep track of various per-function pieces of information for backends that would like to do...

Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)

addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...

const TargetMachine & getTarget() const

getTarget - Return the target machine this machine code is compiled with

Helper class to build MachineInstr.

MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)

Insert an existing instruction at the insertion point.

MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_ZEXT Op.

void setInstr(MachineInstr &MI)

Set the insertion point to before MI.

MachineInstrBuilder buildAssertZExt(const DstOp &Res, const SrcOp &Op, unsigned Size)

Build and insert Res = G_ASSERT_ZEXT Op, Size.

MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_PTR_ADD Op0, Op1.

MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)

Build and insert G_STORE Val, Addr, MMO.

MachineInstrBuilder buildInstr(unsigned Opcode)

Build and insert = Opcode .

MachineInstrBuilder buildPadVectorWithUndefElements(const DstOp &Res, const SrcOp &Op0)

Build and insert a, b, ..., x = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a, b, .....

MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)

Build and insert Res = G_FRAME_INDEX Idx.

MachineFunction & getMF()

Getter for the function we currently build.

MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_TRUNC Op.

const MachineBasicBlock & getMBB() const

Getter for the basic block we currently build.

void setMBB(MachineBasicBlock &MBB)

Set the insertion point to the end of MBB.

MachineRegisterInfo * getMRI()

Getter for MRI.

MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)

Build but don't insert = Opcode .

MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)

Build and insert Res = COPY Op.

virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)

Build and insert Res = G_CONSTANT Val.

Register getReg(unsigned Idx) const

Get the register for the operand index.

const MachineInstrBuilder & addImm(int64_t Val) const

Add a new immediate operand.

const MachineInstrBuilder & add(const MachineOperand &MO) const

const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const

Add a new virtual register operand.

const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const

Add a virtual register definition operand.

const MachineOperand & getOperand(unsigned i) const

@ MOLoad

The memory access reads data.

@ MOInvariant

The memory access always returns the same value (or traps).

@ MOStore

The memory access writes data.

MachineOperand class - Representation of each machine instruction operand.

void setImm(int64_t immVal)

static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)

MachineRegisterInfo - Keep track of information for virtual and physical registers,...

void addLiveIn(MCRegister Reg, Register vreg=Register())

addLiveIn - Add the specified register as a live-in.

Wrapper class representing virtual and physical registers.

SMEAttrs is a utility class to parse the SME ACLE attributes on functions.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

const Triple & getTargetTriple() const

unsigned GuaranteedTailCallOpt

GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.

virtual const RegisterBankInfo * getRegBankInfo() const

If the information for the register banks is available, return it.

virtual const TargetInstrInfo * getInstrInfo() const

Triple - Helper class for working with autoconf configuration names.

static constexpr TypeSize getFixed(ScalarTy ExactSize)

LLVMContext & getContext() const

Return the LLVMContext in which this type was uniqued.

static IntegerType * getInt8Ty(LLVMContext &C)

A Use represents the edge between a Value definition and its users.

unsigned getNumOperands() const

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

@ MO_GOT

MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...

ArrayRef< MCPhysReg > getFPRArgRegs()

ArrayRef< MCPhysReg > getGPRArgRegs()

constexpr std::underlying_type_t< E > Mask()

Get a bitmask with 1s in all places up to the high-order bit of E's largest value.

@ ARM64EC_Thunk_Native

Calling convention used in the ARM64EC ABI to implement calls between ARM64 code and thunks.

@ Swift

Calling convention for Swift.

@ PreserveMost

Used for runtime calls that preserves most registers.

@ PreserveAll

Used for runtime calls that preserves (almost) all registers.

@ Fast

Attempts to make calls as fast as possible (e.g.

@ PreserveNone

Used for runtime calls that preserves none general registers.

@ Tail

Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...

@ SwiftTail

This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...

@ ARM64EC_Thunk_X64

Calling convention used in the ARM64EC ABI to implement calls between x64 code and thunks.

@ C

The default llvm calling convention, compatible with C.

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

@ Implicit

Not emitted register (e.g. carry, or temporary result).

Reg

All possible values of the reg field in the ModR/M byte.

std::optional< Function * > getAttachedARCFunction(const CallBase *CB)

This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...

bool hasAttachedCallOpBundle(const CallBase *CB)

This is an optimization pass for GlobalISel generic memory operations.

Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)

Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...

void append_range(Container &C, Range &&R)

Wrapper function to append range R to container C.

unsigned getBLRCallOpcode(const MachineFunction &MF)

Return opcode to be used for indirect calls.

bool any_of(R &&range, UnaryPredicate P)

Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.

raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)

CCAssignFn - This function assigns a location for Val, updating State to reflect the change.

uint64_t alignTo(uint64_t Size, Align A)

Returns a multiple of A needed to store Size bytes.

void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())

ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...

LLT getLLTForType(Type &Ty, const DataLayout &DL)

Construct a low-level type based on an LLVM type.

Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

This struct is a compact representation of a valid (non-zero power of two) alignment.

SmallVector< Register, 4 > Regs

SmallVector< ISD::ArgFlagsTy, 4 > Flags

Base class for ValueHandlers used for arguments coming into the current function, or for return value...

void assignValueToReg(Register ValVReg, Register PhysReg, const CCValAssign &VA) override

Provides a default implementation for argument handling.

Base class for ValueHandlers used for arguments passed to a function call, or for return values.

MachineIRBuilder & MIRBuilder

MachineRegisterInfo & MRI

virtual LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const

Return the in-memory size to write for the argument at VA.

TypeSize getSizeInBits() const

Return the size of the specified value type in bits.

Type * getTypeForEVT(LLVMContext &Context) const

This method returns an LLVM type corresponding to the specified EVT.

Describes a register that needs to be forwarded from the prologue to a musttail call.

This class contains a discriminated union of information about pointers in memory operands,...

static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)

Stack pointer relative access.

static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)

Return a MachinePointerInfo record that refers to the specified FrameIndex.