LLVM: lib/Target/AArch64/GISel/AArch64CallLowering.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

14

49#include

50#include

51#include

52

53#define DEBUG_TYPE "aarch64-call-lowering"

54

55using namespace llvm;

57

59

62

64 MVT &LocVT) {

65

66

67

68

69

70

71 if (OrigVT == MVT::i1 || OrigVT == MVT::i8)

72 ValVT = LocVT = MVT::i8;

73 else if (OrigVT == MVT::i16)

74 ValVT = LocVT = MVT::i16;

75}

76

77

80 return (ValVT == MVT::i8 || ValVT == MVT::i16) ? LLT(ValVT)

82}

83

84namespace {

85

86struct AArch64IncomingValueAssigner

88 AArch64IncomingValueAssigner(CCAssignFn *AssignFn_,

90 : IncomingValueAssigner(AssignFn_, AssignFnVarArg_) {}

91

92 bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,

94 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,

95 CCState &State) override {

97 return IncomingValueAssigner::assignArg(ValNo, OrigVT, ValVT, LocVT,

98 LocInfo, Info, Flags, State);

99 }

100};

101

102struct AArch64OutgoingValueAssigner

104 const AArch64Subtarget &Subtarget;

105

106

107

108

109 bool IsReturn;

110

111 AArch64OutgoingValueAssigner(CCAssignFn *AssignFn_,

113 const AArch64Subtarget &Subtarget_,

114 bool IsReturn)

115 : OutgoingValueAssigner(AssignFn_, AssignFnVarArg_),

116 Subtarget(Subtarget_), IsReturn(IsReturn) {}

117

118 bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,

120 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,

121 CCState &State) override {

123 bool IsCalleeWin =

125 bool UseVarArgsCCForFixed = IsCalleeWin && State.isVarArg();

126

127 bool Res;

128 if (Flags.isVarArg() && !UseVarArgsCCForFixed) {

129 if (!IsReturn)

131 Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, Info.Ty, State);

132 } else

133 Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, Info.Ty, State);

134

136 return Res;

137 }

138};

139

141 IncomingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)

142 : IncomingValueHandler(MIRBuilder, MRI) {}

143

145 MachinePointerInfo &MPO,

146 ISD::ArgFlagsTy Flags) override {

147 auto &MFI = MIRBuilder.getMF().getFrameInfo();

148

149

150

151 const bool IsImmutable = Flags.isByVal();

152

153 int FI = MFI.CreateFixedObject(Size, Offset, IsImmutable);

155 auto AddrReg = MIRBuilder.buildFrameIndex(LLT::pointer(0, 64), FI);

156 return AddrReg.getReg(0);

157 }

158

159 LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA,

160 ISD::ArgFlagsTy Flags) const override {

161

162

163 if (Flags.isPointer())

166 }

167

169 const CCValAssign &VA) override {

170 markRegUsed(PhysReg);

171 IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);

172 }

173

174 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,

175 const MachinePointerInfo &MPO,

176 const CCValAssign &VA) override {

177 MachineFunction &MF = MIRBuilder.getMF();

178

181

182

185 else {

186

187

189 LocTy = MemTy;

190 }

191

195

197 case CCValAssign::LocInfo::ZExt:

198 MIRBuilder.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, ValVReg, Addr, *MMO);

199 return;

200 case CCValAssign::LocInfo::SExt:

201 MIRBuilder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, ValVReg, Addr, *MMO);

202 return;

203 default:

204 MIRBuilder.buildLoad(ValVReg, Addr, *MMO);

205 return;

206 }

207 }

208

209

210

211

212 virtual void markRegUsed(Register Reg) = 0;

213};

214

218

219 void markRegUsed(Register Reg) override {

222 }

223};

224

225struct CallReturnHandler : public IncomingArgHandler {

226 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,

227 MachineInstrBuilder MIB)

228 : IncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {}

229

230 void markRegUsed(Register Reg) override {

232 }

233

234 MachineInstrBuilder MIB;

235};

236

237

238struct ReturnedArgCallReturnHandler : public CallReturnHandler {

239 ReturnedArgCallReturnHandler(MachineIRBuilder &MIRBuilder,

240 MachineRegisterInfo &MRI,

241 MachineInstrBuilder MIB)

242 : CallReturnHandler(MIRBuilder, MRI, MIB) {}

243

244 void markRegUsed(Register Reg) override {}

245};

246

248 OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,

249 MachineInstrBuilder MIB, bool IsTailCall = false,

250 int FPDiff = 0)

251 : OutgoingValueHandler(MIRBuilder, MRI), MIB(MIB), IsTailCall(IsTailCall),

252 FPDiff(FPDiff),

253 Subtarget(MIRBuilder.getMF().getSubtarget()) {}

254

256 MachinePointerInfo &MPO,

257 ISD::ArgFlagsTy Flags) override {

258 MachineFunction &MF = MIRBuilder.getMF();

261

262 if (IsTailCall) {

263 assert(Flags.isByVal() && "byval unhandled with tail calls");

264

267 auto FIReg = MIRBuilder.buildFrameIndex(p0, FI);

269 return FIReg.getReg(0);

270 }

271

273 SPReg = MIRBuilder.buildCopy(p0, Register(AArch64::SP)).getReg(0);

274

275 auto OffsetReg = MIRBuilder.buildConstant(s64, Offset);

276

277 auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg);

278

280 return AddrReg.getReg(0);

281 }

282

283

284

285

286

287 LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA,

288 ISD::ArgFlagsTy Flags) const override {

289 if (Flags.isPointer())

292 }

293

295 const CCValAssign &VA) override {

297 Register ExtReg = extendRegister(ValVReg, VA);

298 MIRBuilder.buildCopy(PhysReg, ExtReg);

299 }

300

301

303 const CCValAssign &VA,

306 const MachineRegisterInfo &MRI = MF.getRegInfo();

307

308 auto *DefMI = MRI.getVRegDef(ValVReg);

309 assert(DefMI && "No defining instruction");

310 for (;;) {

311

313 if (Op == TargetOpcode::G_ZEXT || Op == TargetOpcode::G_ANYEXT ||

316 continue;

317 }

318 break;

319 }

320

322 if (!Load)

323 return true;

325 auto *LoadAddrDef = MRI.getVRegDef(LoadReg);

326 if (LoadAddrDef->getOpcode() != TargetOpcode::G_FRAME_INDEX)

327 return true;

328 const MachineFrameInfo &MFI = MF.getFrameInfo();

329 int LoadFI = LoadAddrDef->getOperand(1).getIndex();

330

331 auto *StoreAddrDef = MRI.getVRegDef(StoreAddr);

332 if (StoreAddrDef->getOpcode() != TargetOpcode::G_FRAME_INDEX)

333 return true;

334 int StoreFI = StoreAddrDef->getOperand(1).getIndex();

335

337 return true;

339 return true;

341 return true;

342

343 return false;

344 }

345

346 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,

347 const MachinePointerInfo &MPO,

348 const CCValAssign &VA) override {

349 MachineFunction &MF = MIRBuilder.getMF();

351 return;

354 MIRBuilder.buildStore(ValVReg, Addr, *MMO);

355 }

356

357 void assignValueToAddress(const CallLowering::ArgInfo &Arg, unsigned RegIndex,

359 const MachinePointerInfo &MPO,

360 const CCValAssign &VA) override {

362

363

364 if (Arg.Flags[0].isVarArg())

365 MaxSize = 0;

366

368 if (VA.getLocInfo() != CCValAssign::LocInfo::FPExt) {

371

375 }

376

377 ValVReg = extendRegister(ValVReg, VA, MaxSize);

378 } else {

379

381 }

382

383 assignValueToAddress(ValVReg, Addr, MemTy, MPO, VA);

384 }

385

386 MachineInstrBuilder MIB;

387

388 bool IsTailCall;

389

390

391

392 int FPDiff;

393

394

396

397 const AArch64Subtarget &Subtarget;

398};

399}

400

405

407 const Value *Val,

410 Register SwiftErrorVReg) const {

412 assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&

413 "Return value without a vreg");

414

418 } else if (!VRegs.empty()) {

422

425 CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());

426 auto &DL = F.getDataLayout();

428

432 "For each split Type there should be exactly one VReg.");

433

436

437 for (unsigned i = 0; i < SplitEVTs.size(); ++i) {

439 ArgInfo CurArgInfo = ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx), 0};

440 setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);

441

442

443

444 auto &Flags = CurArgInfo.Flags[0];

446 !Flags.isSExt() && !Flags.isZExt()) {

448 } else if (TLI.getNumRegistersForCallingConv(Ctx, CC, SplitEVTs[i]) ==

449 1) {

450

451 MVT NewVT = TLI.getRegisterTypeForCallingConv(Ctx, CC, SplitEVTs[i]);

452 if (EVT(NewVT) != SplitEVTs[i]) {

453 unsigned ExtendOp = TargetOpcode::G_ANYEXT;

454 if (F.getAttributes().hasRetAttr(Attribute::SExt))

455 ExtendOp = TargetOpcode::G_SEXT;

456 else if (F.getAttributes().hasRetAttr(Attribute::ZExt))

457 ExtendOp = TargetOpcode::G_ZEXT;

458

459 LLT NewLLT(NewVT);

462

463

467 CurVReg =

470 } else {

471

472 CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg})

474 }

477

478

479

480 CurVReg =

483 } else {

485 return false;

486 }

487 } else {

488

489

490

491 if (NewLLT != MRI.getType(CurVReg)) {

492

493 CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg})

495 }

496 }

497 }

498 }

499 if (CurVReg != CurArgInfo.Regs[0]) {

500 CurArgInfo.Regs[0] = CurVReg;

501

502 setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);

503 }

505 }

506

507 AArch64OutgoingValueAssigner Assigner(AssignFn, AssignFn, Subtarget,

508 true);

509 OutgoingArgHandler Handler(MIRBuilder, MRI, MIB);

511 MIRBuilder, CC, F.isVarArg());

512 }

513

514 if (SwiftErrorVReg) {

516 MIRBuilder.buildCopy(AArch64::X21, SwiftErrorVReg);

517 }

518

521}

522

526 bool IsVarArg) const {

529 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs,

531

532 return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv));

533}

534

535

536

537

543

545 return;

546

549 assert(F.isVarArg() && "Expected F to be vararg?");

550

551

553 CCState CCInfo(F.getCallingConv(), true, MF, ArgLocs,

554 F.getContext());

556 RegParmTypes.push_back(MVT::i64);

557 RegParmTypes.push_back(MVT::f128);

558

559

563

564

565

567 Register X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass);

569 }

570

571

572 for (const auto &F : Forwards) {

573 MBB.addLiveIn(F.PReg);

575 }

576}

577

580 if (EnableSVEGISel && (F.getReturnType()->isScalableTy() ||

582 return A.getType()->isScalableTy();

583 })))

584 return true;

586 if (!ST.hasNEON() || !ST.hasFPARMv8()) {

587 LLVM_DEBUG(dbgs() << "Falling back to SDAG because we don't support no-NEON\n");

588 return true;

589 }

590

592 if (Attrs.hasZAState() || Attrs.hasZT0State() ||

593 Attrs.hasStreamingInterfaceOrBody() ||

594 Attrs.hasStreamingCompatibleInterface())

595 return true;

596

597 return false;

598}

599

600void AArch64CallLowering::saveVarArgRegisters(

605

611 bool IsWin64CC = Subtarget.isCallingConvWin64(CCInfo.getCallingConv(),

615

617 unsigned NumVariadicGPRArgRegs = GPRArgRegs.size() - FirstVariadicGPR + 1;

618

619 unsigned GPRSaveSize = 8 * (GPRArgRegs.size() - FirstVariadicGPR);

620 int GPRIdx = 0;

621 if (GPRSaveSize != 0) {

622 if (IsWin64CC) {

624 -static_cast<int>(GPRSaveSize), false);

625 if (GPRSaveSize & 15)

626

628 -static_cast<int>(alignTo(GPRSaveSize, 16)),

629 false);

630 } else

632

635 MIRBuilder.buildConstant(MRI.createGenericVirtualRegister(s64), 8);

636

637 for (unsigned i = FirstVariadicGPR; i < GPRArgRegs.size(); ++i) {

638 Register Val = MRI.createGenericVirtualRegister(s64);

644 MF, GPRIdx, (i - FirstVariadicGPR) * 8)

647

648 FIN = MIRBuilder.buildPtrAdd(MRI.createGenericVirtualRegister(p0),

649 FIN.getReg(0), Offset);

650 }

651 }

654

655 if (Subtarget.hasFPARMv8() && !IsWin64CC) {

657

658 unsigned FPRSaveSize = 16 * (FPRArgRegs.size() - FirstVariadicFPR);

659 int FPRIdx = 0;

660 if (FPRSaveSize != 0) {

662

665 MIRBuilder.buildConstant(MRI.createGenericVirtualRegister(s64), 16);

666

667 for (unsigned i = FirstVariadicFPR; i < FPRArgRegs.size(); ++i) {

674

677

678 FIN = MIRBuilder.buildPtrAdd(MRI.createGenericVirtualRegister(p0),

679 FIN.getReg(0), Offset);

680 }

681 }

684 }

685}

686

693 auto &DL = F.getDataLayout();

695

696

697

698 if (F.isVarArg() && Subtarget.isWindowsArm64EC())

699 return false;

700

701

702

705 return false;

706

707 bool IsWin64 =

708 Subtarget.isCallingConvWin64(F.getCallingConv(), F.isVarArg()) &&

709 !Subtarget.isWindowsArm64EC();

710

713

714

715

718

719 unsigned i = 0;

720 for (auto &Arg : F.args()) {

721 if (DL.getTypeStoreSize(Arg.getType()).isZero())

722 continue;

723

724 ArgInfo OrigArg{VRegs[i], Arg, i};

725 setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, F);

726

727

728

731 MRI.getType(OrigArg.Regs[0]).getSizeInBits() == 1 &&

732 "Unexpected registers used for i1 arg");

733

734 auto &Flags = OrigArg.Flags[0];

735 if (!Flags.isZExt() && !Flags.isSExt()) {

736

739 OrigArg.Regs[0] = WideReg;

740 BoolArgs.push_back({OrigReg, WideReg});

741 }

742 }

743

744 if (Arg.hasAttribute(Attribute::SwiftAsync))

746

748 ++i;

749 }

750

751 if (MBB.empty())

753

755 CCAssignFn *AssignFn = TLI.CCAssignFnForCall(F.getCallingConv(), IsWin64 && F.isVarArg());

756

757 AArch64IncomingValueAssigner Assigner(AssignFn, AssignFn);

760 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());

762 handleAssignments(Handler, SplitArgs, CCInfo, ArgLocs, MIRBuilder))

763 return false;

764

765 if (!BoolArgs.empty()) {

766 for (auto &KV : BoolArgs) {

768 Register WideReg = KV.second;

769 LLT WideTy = MRI.getType(WideReg);

770 assert(MRI.getType(OrigReg).getScalarSizeInBits() == 1 &&

771 "Unexpected bit size of a bool arg");

774 }

775 }

776

778 uint64_t StackSize = Assigner.StackSize;

779 if (F.isVarArg()) {

780 if ((!Subtarget.isTargetDarwin() && !Subtarget.isWindowsArm64EC()) || IsWin64) {

781

782

783

784

785

786 saveVarArgRegisters(MIRBuilder, Handler, CCInfo);

787 } else if (Subtarget.isWindowsArm64EC()) {

788 return false;

789 }

790

791

792 StackSize = alignTo(Assigner.StackSize, Subtarget.isTargetILP32() ? 4 : 8);

793

796 }

797

800

801

802 StackSize = alignTo(StackSize, 16);

803

804

805

807

808

809

810 }

811

812

813

814

815

817

818 if (Subtarget.hasCustomCallingConv())

819 Subtarget.getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF);

820

822

823

825

826 return true;

827}

828

829

834

835

837 switch (CC) {

846 return true;

847 default:

848 return false;

849 }

850}

851

852

853

854static std::pair<CCAssignFn *, CCAssignFn *>

858

859bool AArch64CallLowering::doCallerAndCalleePassArgsTheSameWay(

865

866

867 if (CalleeCC == CallerCC)

868 return true;

869

870

874 std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) =

876

879 std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) =

881

882 AArch64IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed,

883 CalleeAssignFnVarArg);

884 AArch64IncomingValueAssigner CallerAssigner(CallerAssignFnFixed,

885 CallerAssignFnVarArg);

886

888 return false;

889

890

891 auto TRI = MF.getSubtarget().getRegisterInfo();

892 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);

893 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);

894 if (MF.getSubtarget().hasCustomCallingConv()) {

895 TRI->UpdateCustomCallPreservedMask(MF, &CallerPreserved);

896 TRI->UpdateCustomCallPreservedMask(MF, &CalleePreserved);

897 }

898

899 return TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved);

900}

901

902bool AArch64CallLowering::areCalleeOutgoingArgsTailCallable(

905

906 if (OrigOutArgs.empty())

907 return true;

908

910 LLVMContext &Ctx = CallerF.getContext();

914 const AArch64Subtarget &Subtarget = MF.getSubtarget();

915

918 std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);

919

920

922 CCState OutInfo(CalleeCC, false, MF, OutLocs, Ctx);

923

924 AArch64OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg,

925 Subtarget, false);

926

930 LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n");

931 return false;

932 }

933

934

935 const AArch64FunctionInfo *FuncInfo = MF.getInfo();

937 LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n");

938 return false;

939 }

940

941

942

943

944 auto TRI = MF.getSubtarget().getRegisterInfo();

945 const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC);

947

948 if (Info.IsVarArg) {

949

950

951

952

953

954 for (unsigned i = 0; i < OutLocs.size(); ++i) {

955 auto &ArgLoc = OutLocs[i];

956 if (ArgLoc.isRegLoc())

957 continue;

958

961 << "... Cannot tail call vararg function with stack arguments\n");

962 return false;

963 }

964 }

965

967}

968

973

974

975 if (!Info.IsTailCall)

976 return false;

977

981

982 LLVM_DEBUG(dbgs() << "Attempting to lower call as tail call\n");

983

984 if (Info.SwiftErrorVReg) {

985

986

987

988

989 LLVM_DEBUG(dbgs() << "... Cannot handle tail calls with swifterror yet.\n");

990 return false;

991 }

992

994 LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n");

995 return false;

996 }

997

998

999

1000

1001

1002

1003

1004

1005

1006

1007

1008

1009

1010

1011

1012

1013

1014

1016 return A.hasByValAttr() || A.hasInRegAttr() || A.hasSwiftErrorAttr();

1017 })) {

1018 LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval, "

1019 "inreg, or swifterror arguments\n");

1020 return false;

1021 }

1022

1023

1024

1025

1026

1027

1028

1029

1030 if (Info.Callee.isGlobal()) {

1031 const GlobalValue *GV = Info.Callee.getGlobal();

1034 (!TT.isOSWindows() || TT.isOSBinFormatELF() ||

1035 TT.isOSBinFormatMachO())) {

1036 LLVM_DEBUG(dbgs() << "... Cannot tail call externally-defined function "

1037 "with weak linkage for this OS.\n");

1038 return false;

1039 }

1040 }

1041

1042

1045

1046

1047

1048

1049

1050

1052 "Unexpected variadic calling convention");

1053

1054

1055

1056 if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) {

1059 << "... Caller and callee have incompatible calling conventions.\n");

1060 return false;

1061 }

1062

1063 if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs))

1064 return false;

1065

1067 dbgs() << "... Call is eligible for tail call optimization.\n");

1068 return true;

1069}

1070

1072 bool IsTailCall,

1073 std::optionalCallLowering::PtrAuthInfo &PAI,

1076

1077 if (!IsTailCall) {

1078 if (!PAI)

1080

1081 assert(IsIndirect && "Direct call should not be authenticated");

1083 "Invalid auth call key");

1084 return AArch64::BLRA;

1085 }

1086

1087 if (!IsIndirect)

1088 return AArch64::TCRETURNdi;

1089

1090

1091

1094 assert(!PAI && "ptrauth tail-calls not yet supported with PAuthLR");

1095 return AArch64::TCRETURNrix17;

1096 }

1097 if (PAI)

1098 return AArch64::AUTH_TCRETURN_BTI;

1099 return AArch64::TCRETURNrix16x17;

1100 }

1101

1103 assert(!PAI && "ptrauth tail-calls not yet supported with PAuthLR");

1104 return AArch64::TCRETURNrinotx16;

1105 }

1106

1107 if (PAI)

1108 return AArch64::AUTH_TCRETURN;

1109 return AArch64::TCRETURNri;

1110}

1111

1112static const uint32_t *

1117 if (!OutArgs.empty() && OutArgs[0].Flags[0].isReturned()) {

1118

1119 Mask = TRI.getThisReturnPreservedMask(MF, Info.CallConv);

1120 if (!Mask) {

1121 OutArgs[0].Flags[0].setReturned(false);

1122 Mask = TRI.getCallPreservedMask(MF, Info.CallConv);

1123 }

1124 } else {

1125 Mask = TRI.getCallPreservedMask(MF, Info.CallConv);

1126 }

1127 return Mask;

1128}

1129

1130bool AArch64CallLowering::lowerTailCall(

1133 MachineFunction &MF = MIRBuilder.getMF();

1137 AArch64FunctionInfo *FuncInfo = MF.getInfo();

1138

1139

1143

1144

1148 std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);

1149

1150 MachineInstrBuilder CallSeqStart;

1151 if (!IsSibCall)

1152 CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);

1153

1157

1158

1159 const AArch64Subtarget &Subtarget = MF.getSubtarget();

1161

1162

1163

1164 MIB.addImm(0);

1165

1166

1167 if (Opc == AArch64::AUTH_TCRETURN || Opc == AArch64::AUTH_TCRETURN_BTI) {

1170 "Invalid auth call key");

1171 MIB.addImm(Info.PAI->Key);

1172

1174 uint16_t IntDisc = 0;

1175 std::tie(IntDisc, AddrDisc) =

1177

1178 MIB.addImm(IntDisc);

1179 MIB.addUse(AddrDisc);

1180 if (AddrDisc != AArch64::NoRegister) {

1184 MIB->getOperand(4), 4));

1185 }

1186 }

1187

1188

1189 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC);

1191 TRI->UpdateCustomCallPreservedMask(MF, &Mask);

1192 MIB.addRegMask(Mask);

1193

1194 if (Info.CFIType)

1195 MIB->setCFIType(MF, Info.CFIType->getZExtValue());

1196

1197 if (TRI->isAnyArgRegReserved(MF))

1198 TRI->emitReservedArgRegCallError(MF);

1199

1200

1201

1202

1203

1204

1205 int FPDiff = 0;

1206

1207

1208

1209

1210 unsigned NumBytes = 0;

1211 if (!IsSibCall) {

1212

1213

1214

1217 CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext());

1218

1219 AArch64OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg,

1220 Subtarget, false);

1222 return false;

1223

1224

1225

1226 NumBytes = alignTo(OutInfo.getStackSize(), 16);

1227

1228

1229

1230

1231 FPDiff = NumReusableBytes - NumBytes;

1232

1233

1234

1235 if (FPDiff < 0 && FuncInfo->getTailCallReservedStack() < (unsigned)-FPDiff)

1237

1238

1239

1240

1241

1242

1243 assert(FPDiff % 16 == 0 && "unaligned stack on tail call");

1244 }

1245

1247

1248 AArch64OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg,

1249 Subtarget, false);

1250

1251

1252 OutgoingArgHandler Handler(MIRBuilder, MRI, MIB,

1253 true, FPDiff);

1255 CalleeCC, Info.IsVarArg))

1256 return false;

1257

1259

1260 if (Info.IsVarArg && Info.IsMustTailCall) {

1261

1262

1263

1264 for (const auto &F : Forwards) {

1266

1267

1268 if (any_of(MIB->uses(), [&ForwardedReg, &TRI](const MachineOperand &Use) {

1269 if (!Use.isReg())

1270 return false;

1271 return TRI->regsOverlap(Use.getReg(), ForwardedReg);

1272 }))

1273 continue;

1274

1275

1278 }

1279 }

1280

1281

1282

1283 if (!IsSibCall) {

1284 MIB->getOperand(1).setImm(FPDiff);

1286

1287

1288

1289

1291 }

1292

1293

1295

1296

1297

1298 if (MIB->getOperand(0).isReg())

1301 MIB->getDesc(), MIB->getOperand(0), 0);

1302

1304 Info.LoweredTailCall = true;

1305 return true;

1306}

1307

1313 auto &DL = F.getDataLayout();

1316

1317

1318

1319

1320

1322 return false;

1323

1324

1325

1328 return false;

1329

1331 for (auto &OrigArg : Info.OrigArgs) {

1333

1334 auto &Flags = OrigArg.Flags[0];

1335 if (OrigArg.Ty->isIntegerTy(1) && !Flags.isSExt() && !Flags.isZExt()) {

1338 MRI.getType(OutArg.Regs[0]).getSizeInBits() == 1 &&

1339 "Unexpected registers used for i1 arg");

1340

1341

1342

1343 OutArg.Regs[0] =

1347 }

1348 }

1349

1351 if (!Info.OrigRet.Ty->isVoidTy())

1353

1354

1355 bool CanTailCallOpt =

1357

1358

1359 if (Info.IsMustTailCall && !CanTailCallOpt) {

1360

1361

1362

1363 LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n");

1364 return false;

1365 }

1366

1367 Info.IsTailCall = CanTailCallOpt;

1368 if (CanTailCallOpt)

1369 return lowerTailCall(MIRBuilder, Info, OutArgs);

1370

1371

1374 std::tie(AssignFnFixed, AssignFnVarArg) =

1376

1378 CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);

1379

1380

1381

1382

1383 unsigned Opc = 0;

1384

1385

1386

1388 Opc = Info.PAI ? AArch64::BLRA_RVMARKER : AArch64::BLR_RVMARKER;

1389

1390

1391 else if (Info.CB && Info.CB->hasFnAttr(Attribute::ReturnsTwice) &&

1392 !Subtarget.noBTIAtReturnTwice() &&

1394 Opc = AArch64::BLR_BTI;

1395 else {

1396

1397

1398 if (Info.Callee.isSymbol() && F.getParent()->getRtLibUseGOT()) {

1399 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_GLOBAL_VALUE);

1401 MIB.addExternalSymbol(Info.Callee.getSymbolName(), AArch64II::MO_GOT);

1403 }

1405 }

1406

1408 unsigned CalleeOpNo = 0;

1409

1410 if (Opc == AArch64::BLR_RVMARKER || Opc == AArch64::BLRA_RVMARKER) {

1411

1412

1414 MIB.addGlobalAddress(ARCFn);

1415 ++CalleeOpNo;

1416

1417

1418

1420 ++CalleeOpNo;

1421 } else if (Info.CFIType) {

1422 MIB->setCFIType(MF, Info.CFIType->getZExtValue());

1423 }

1425

1426 MIB.add(Info.Callee);

1427

1428

1431

1432 AArch64OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg,

1433 Subtarget, false);

1434

1435 OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, false);

1437 Info.CallConv, Info.IsVarArg))

1438 return false;

1439

1441

1442 if (Opc == AArch64::BLRA || Opc == AArch64::BLRA_RVMARKER) {

1445 "Invalid auth call key");

1446 MIB.addImm(Info.PAI->Key);

1447

1450 std::tie(IntDisc, AddrDisc) =

1452

1453 MIB.addImm(IntDisc);

1454 MIB.addUse(AddrDisc);

1455 if (AddrDisc != AArch64::NoRegister) {

1458 MIB->getDesc(), MIB->getOperand(CalleeOpNo + 3),

1459 CalleeOpNo + 3);

1460 }

1461 }

1462

1463

1465 TRI->UpdateCustomCallPreservedMask(MF, &Mask);

1466 MIB.addRegMask(Mask);

1467

1468 if (TRI->isAnyArgRegReserved(MF))

1469 TRI->emitReservedArgRegCallError(MF);

1470

1471

1473

1477 ? alignTo(Assigner.StackSize, 16)

1478 : 0;

1479

1480 CallSeqStart.addImm(Assigner.StackSize).addImm(0);

1481 MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP)

1482 .addImm(Assigner.StackSize)

1483 .addImm(CalleePopBytes);

1484

1485

1486

1487

1488 if (MIB->getOperand(CalleeOpNo).isReg())

1491 MIB->getOperand(CalleeOpNo), CalleeOpNo);

1492

1493

1494

1495

1496 if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) {

1497 CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv);

1498 CallReturnHandler Handler(MIRBuilder, MRI, MIB);

1499 bool UsingReturnedArg =

1500 !OutArgs.empty() && OutArgs[0].Flags[0].isReturned();

1501

1502 AArch64OutgoingValueAssigner Assigner(RetAssignFn, RetAssignFn, Subtarget,

1503 false);

1504 ReturnedArgCallReturnHandler ReturnedArgHandler(MIRBuilder, MRI, MIB);

1506 UsingReturnedArg ? ReturnedArgHandler : Handler, Assigner, InArgs,

1507 MIRBuilder, Info.CallConv, Info.IsVarArg,

1508 UsingReturnedArg ? ArrayRef(OutArgs[0].Regs)

1510 return false;

1511 }

1512

1513 if (Info.SwiftErrorVReg) {

1515 MIRBuilder.buildCopy(Info.SwiftErrorVReg, Register(AArch64::X21));

1516 }

1517

1518 if (!Info.CanLowerReturn) {

1519 insertSRetLoads(MIRBuilder, Info.OrigRet.Ty, Info.OrigRet.Regs,

1520 Info.DemoteRegister, Info.DemoteStackIndex);

1521 }

1522 return true;

1523}

1524

1526 return Ty.getSizeInBits() == 64;

1527}

unsigned const MachineRegisterInfo * MRI

static void handleMustTailForwardedRegisters(MachineIRBuilder &MIRBuilder, CCAssignFn *AssignFn)

Helper function to compute forwarded registers for musttail calls.

Definition AArch64CallLowering.cpp:538

static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect, bool IsTailCall, std::optional< CallLowering::PtrAuthInfo > &PAI, MachineRegisterInfo &MRI)

Definition AArch64CallLowering.cpp:1071

static LLT getStackValueStoreTypeHack(const CCValAssign &VA)

Definition AArch64CallLowering.cpp:78

static const uint32_t * getMaskForArgs(SmallVectorImpl< AArch64CallLowering::ArgInfo > &OutArgs, AArch64CallLowering::CallLoweringInfo &Info, const AArch64RegisterInfo &TRI, MachineFunction &MF)

Definition AArch64CallLowering.cpp:1113

static void applyStackPassedSmallTypeDAGHack(EVT OrigVT, MVT &ValVT, MVT &LocVT)

Definition AArch64CallLowering.cpp:63

static std::pair< CCAssignFn *, CCAssignFn * > getAssignFnsForCC(CallingConv::ID CC, const AArch64TargetLowering &TLI)

Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for CC.

Definition AArch64CallLowering.cpp:855

static bool doesCalleeRestoreStack(CallingConv::ID CallConv, bool TailCallOpt)

Definition AArch64CallLowering.cpp:401

This file describes how to lower LLVM calls to machine code calls.

MachineInstrBuilder MachineInstrBuilder & DefMI

static std::tuple< SDValue, SDValue > extractPtrauthBlendDiscriminators(SDValue Disc, SelectionDAG *DAG)

static bool shouldLowerTailCallStackArg(const MachineFunction &MF, const CCValAssign &VA, SDValue Arg, ISD::ArgFlagsTy Flags, int CallOffset)

Check whether a stack argument requires lowering in a tail call.

static const MCPhysReg GPRArgRegs[]

static const MCPhysReg FPRArgRegs[]

cl::opt< bool > EnableSVEGISel("aarch64-enable-gisel-sve", cl::Hidden, cl::desc("Enable / disable SVE scalable vectors in Global ISel"), cl::init(false))

static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls)

Return true if the calling convention is one that we can guarantee TCO for.

static bool mayTailCallThisCC(CallingConv::ID CC)

Return true if we might ever do TCO for calls with this calling convention.

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

This file contains the simple types necessary to represent the attributes associated with functions a...

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

Analysis containing CSE Info

Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...

Implement a low-level type suitable for MachineInstr level instruction selection.

This file declares the MachineIRBuilder class.

Register const TargetRegisterInfo * TRI

Promote Memory to Register

static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)

This file defines ARC utility functions which are used by various parts of the compiler.

static constexpr MCPhysReg SPReg

This file defines the SmallVector class.

bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef< Register > VRegs, FunctionLoweringInfo &FLI, Register SwiftErrorVReg) const override

This hook must be implemented to lower outgoing return values, described by Val, into the specified v...

Definition AArch64CallLowering.cpp:406

bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv, SmallVectorImpl< BaseArgInfo > &Outs, bool IsVarArg) const override

This hook must be implemented to check whether the return values described by Outs can fit into the r...

Definition AArch64CallLowering.cpp:523

bool fallBackToDAGISel(const MachineFunction &MF) const override

Definition AArch64CallLowering.cpp:578

bool isTypeIsValidForThisReturn(EVT Ty) const override

For targets which support the "returned" parameter attribute, returns true if the given type is a val...

Definition AArch64CallLowering.cpp:1525

bool isEligibleForTailCallOptimization(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info, SmallVectorImpl< ArgInfo > &InArgs, SmallVectorImpl< ArgInfo > &OutArgs) const

Returns true if the call can be lowered as a tail call.

Definition AArch64CallLowering.cpp:969

AArch64CallLowering(const AArch64TargetLowering &TLI)

Definition AArch64CallLowering.cpp:60

bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const override

This hook must be implemented to lower the given call instruction, including argument and return valu...

Definition AArch64CallLowering.cpp:1308

bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef< ArrayRef< Register > > VRegs, FunctionLoweringInfo &FLI) const override

This hook must be implemented to lower the incoming (formal) arguments, described by VRegs,...

Definition AArch64CallLowering.cpp:687

AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...

bool branchTargetEnforcement() const

void setVarArgsStackIndex(int Index)

void setTailCallReservedStack(unsigned bytes)

SmallVectorImpl< ForwardedRegister > & getForwardedMustTailRegParms()

void setBytesInStackArgArea(unsigned bytes)

void setVarArgsGPRIndex(int Index)

bool branchProtectionPAuthLR() const

void setVarArgsFPRSize(unsigned Size)

unsigned getBytesInStackArgArea() const

void setVarArgsFPRIndex(int Index)

void setVarArgsGPRSize(unsigned Size)

void setArgumentStackToRestore(unsigned bytes)

const AArch64RegisterInfo * getRegisterInfo() const override

const AArch64InstrInfo * getInstrInfo() const override

bool isWindowsArm64EC() const

bool isCallingConvWin64(CallingConv::ID CC, bool IsVarArg) const

const RegisterBankInfo * getRegBankInfo() const override

bool hasCustomCallingConv() const

CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const

Selects the correct CCAssignFn for a given CallingConvention value.

This class represents an incoming formal argument to a Function.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

size_t size() const

size - Get the array size.

bool empty() const

empty - Check if the array is empty.

CCState - This class holds information needed while lowering arguments and return values.

MachineFunction & getMachineFunction() const

unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const

getFirstUnallocated - Return the index of the first unallocated register in the set,...

LLVM_ABI void analyzeMustTailForwardedRegisters(SmallVectorImpl< ForwardedRegister > &Forwards, ArrayRef< MVT > RegParmTypes, CCAssignFn Fn)

Compute the set of registers that need to be preserved and forwarded to any musttail calls.

CallingConv::ID getCallingConv() const

uint64_t getStackSize() const

Returns the size of the currently allocated portion of the stack.

bool isAllocated(MCRegister Reg) const

isAllocated - Return true if the specified register (or an alias) is allocated.

CCValAssign - Represent assignment of one arg/retval to a location.

LocInfo getLocInfo() const

static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)

void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg, int FI) const

Load the returned value from the stack into virtual registers in VRegs.

bool handleAssignments(ValueHandler &Handler, SmallVectorImpl< ArgInfo > &Args, CCState &CCState, SmallVectorImpl< CCValAssign > &ArgLocs, MachineIRBuilder &MIRBuilder, ArrayRef< Register > ThisReturnRegs={}) const

Use Handler to insert code to handle the argument/return values represented by Args.

bool resultsCompatible(CallLoweringInfo &Info, MachineFunction &MF, SmallVectorImpl< ArgInfo > &InArgs, ValueAssigner &CalleeAssigner, ValueAssigner &CallerAssigner) const

void splitToValueTypes(const ArgInfo &OrigArgInfo, SmallVectorImpl< ArgInfo > &SplitArgs, const DataLayout &DL, CallingConv::ID CallConv, SmallVectorImpl< uint64_t > *Offsets=nullptr) const

Break OrigArgInfo into one or more pieces the calling convention can process, returned in SplitArgs.

void insertSRetIncomingArgument(const Function &F, SmallVectorImpl< ArgInfo > &SplitArgs, Register &DemoteReg, MachineRegisterInfo &MRI, const DataLayout &DL) const

Insert the hidden sret ArgInfo to the beginning of SplitArgs.

bool determineAndHandleAssignments(ValueHandler &Handler, ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, bool IsVarArg, ArrayRef< Register > ThisReturnRegs={}) const

Invoke ValueAssigner::assignArg on each of the given Args and then use Handler to move them to the as...

void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg) const

Store the return value given by VRegs into stack starting at the offset specified in DemoteReg.

bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< ArgInfo > &OutVals) const

Check whether parameters to a call that are passed in callee saved registers are the same as from the...

bool determineAssignments(ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, CCState &CCInfo) const

Analyze the argument list in Args, using Assigner to populate CCInfo.

bool checkReturn(CCState &CCInfo, SmallVectorImpl< BaseArgInfo > &Outs, CCAssignFn *Fn) const

CallLowering(const TargetLowering *TLI)

const TargetLowering * getTLI() const

Getter for generic TargetLowering class.

void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const

void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const

FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)

FunctionLoweringInfo - This contains information that is global to a function that is used when lower...

Register DemoteRegister

DemoteRegister - if CanLowerReturn is false, DemoteRegister is a vreg allocated to hold a pointer to ...

bool CanLowerReturn

CanLowerReturn - true iff the function's return value can be lowered to registers.

iterator_range< arg_iterator > args()

CallingConv::ID getCallingConv() const

getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...

LLVMContext & getContext() const

getContext - Return a reference to the LLVMContext associated with this function.

bool isVarArg() const

isVarArg - Return true if this function takes a variable number of arguments.

bool hasExternalWeakLinkage() const

static constexpr LLT scalar(unsigned SizeInBits)

Get a low-level scalar or aggregate "bag of bits".

constexpr uint16_t getNumElements() const

Returns the number of elements in a vector LLT.

constexpr bool isVector() const

static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)

Get a low-level pointer in the given address space.

constexpr TypeSize getSizeInBits() const

Returns the total size of the type. Must only be called on sized types.

constexpr TypeSize getSizeInBytes() const

Returns the total size of the type in bytes, i.e.

This is an important class for using LLVM in a threaded context.

bool isVector() const

Return true if this is a vector value type.

The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.

LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)

Create a new object at a fixed location on the stack.

LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)

Create a new statically sized stack object, returning a nonnegative identifier to represent it.

bool isImmutableObjectIndex(int ObjectIdx) const

Returns true if the specified index corresponds to an immutable object.

void setHasTailCall(bool V=true)

bool hasMustTailInVarArgFunc() const

Returns true if the function is variadic and contains a musttail call.

int64_t getObjectSize(int ObjectIdx) const

Return the size of the specified object.

int64_t getObjectOffset(int ObjectIdx) const

Return the assigned stack offset of the specified object from the incoming stack pointer.

const TargetSubtargetInfo & getSubtarget() const

getSubtarget - Return the subtarget for which this machine code is being compiled.

MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)

getMachineMemOperand - Allocate a new MachineMemOperand.

MachineFrameInfo & getFrameInfo()

getFrameInfo - Return the frame info object for the current function.

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

Function & getFunction()

Return the LLVM function that this machine code represents.

Ty * getInfo()

getInfo - Keep track of various per-function pieces of information for backends that would like to do...

Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)

addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...

const TargetMachine & getTarget() const

getTarget - Return the target machine this machine code is compiled with

Helper class to build MachineInstr.

MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)

Insert an existing instruction at the insertion point.

MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_ZEXT Op.

void setInstr(MachineInstr &MI)

Set the insertion point to before MI.

MachineInstrBuilder buildAssertZExt(const DstOp &Res, const SrcOp &Op, unsigned Size)

Build and insert Res = G_ASSERT_ZEXT Op, Size.

MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_PTR_ADD Op0, Op1.

MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)

Build and insert G_STORE Val, Addr, MMO.

MachineInstrBuilder buildInstr(unsigned Opcode)

Build and insert = Opcode .

MachineInstrBuilder buildPadVectorWithUndefElements(const DstOp &Res, const SrcOp &Op0)

Build and insert a, b, ..., x = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a, b, .....

MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)

Build and insert Res = G_FRAME_INDEX Idx.

MachineFunction & getMF()

Getter for the function we currently build.

MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_TRUNC Op.

const MachineBasicBlock & getMBB() const

Getter for the basic block we currently build.

void setMBB(MachineBasicBlock &MBB)

Set the insertion point to the end of MBB.

MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)

Build but don't insert = Opcode .

MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)

Build and insert Res = COPY Op.

virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)

Build and insert Res = G_CONSTANT Val.

Register getReg(unsigned Idx) const

Get the register for the operand index.

const MachineInstrBuilder & addImm(int64_t Val) const

Add a new immediate operand.

const MachineInstrBuilder & add(const MachineOperand &MO) const

const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const

Add a new virtual register operand.

const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const

Add a virtual register definition operand.

unsigned getOpcode() const

Returns the opcode of this MachineInstr.

const MachineOperand & getOperand(unsigned i) const

LLVM_ABI void setDeactivationSymbol(MachineFunction &MF, Value *DS)

@ MOLoad

The memory access reads data.

@ MOInvariant

The memory access always returns the same value (or traps).

@ MOStore

The memory access writes data.

Register getReg() const

getReg - Returns the register number.

static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)

MachineRegisterInfo - Keep track of information for virtual and physical registers,...

Wrapper class representing virtual and physical registers.

MCRegister asMCReg() const

Utility to check-convert this value to a MCRegister.

SMEAttrs is a utility class to parse the SME ACLE attributes on functions.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

const Triple & getTargetTriple() const

unsigned GuaranteedTailCallOpt

GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.

virtual const RegisterBankInfo * getRegBankInfo() const

If the information for the register banks is available, return it.

virtual const TargetInstrInfo * getInstrInfo() const

Triple - Helper class for working with autoconf configuration names.

static constexpr TypeSize getFixed(ScalarTy ExactSize)

static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)

LLVMContext & getContext() const

Return the LLVMContext in which this type was uniqued.

bool isIntegerTy() const

True if this is an instance of IntegerType.

unsigned getNumOperands() const

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

@ MO_GOT

MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...

ArrayRef< MCPhysReg > getFPRArgRegs()

ArrayRef< MCPhysReg > getGPRArgRegs()

constexpr char Align[]

Key for Kernel::Arg::Metadata::mAlign.

constexpr std::underlying_type_t< E > Mask()

Get a bitmask with 1s in all places up to the high-order bit of E's largest value.

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

@ ARM64EC_Thunk_Native

Calling convention used in the ARM64EC ABI to implement calls between ARM64 code and thunks.

@ Swift

Calling convention for Swift.

@ PreserveMost

Used for runtime calls that preserves most registers.

@ PreserveAll

Used for runtime calls that preserves (almost) all registers.

@ Fast

Attempts to make calls as fast as possible (e.g.

@ PreserveNone

Used for runtime calls that preserves none general registers.

@ Tail

Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...

@ SwiftTail

This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...

@ ARM64EC_Thunk_X64

Calling convention used in the ARM64EC ABI to implement calls between x64 code and thunks.

@ C

The default llvm calling convention, compatible with C.

@ Implicit

Not emitted register (e.g. carry, or temporary result).

std::optional< Function * > getAttachedARCFunction(const CallBase *CB)

This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...

bool attachedCallOpBundleNeedsMarker(const CallBase *CB)

This function determines whether the clang_arc_attachedcall should be emitted with or without the mar...

bool hasAttachedCallOpBundle(const CallBase *CB)

This is an optimization pass for GlobalISel generic memory operations.

LLVM_ABI Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)

Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...

void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())

ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)

CCAssignFn - This function assigns a location for Val, updating State to reflect the change.

void append_range(Container &C, Range &&R)

Wrapper function to append range R to container C.

unsigned getBLRCallOpcode(const MachineFunction &MF)

Return opcode to be used for indirect calls.

bool any_of(R &&range, UnaryPredicate P)

Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.

LLVM_ABI raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

class LLVM_GSL_OWNER SmallVector

Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...

@ Success

The lock was released successfully.

uint64_t alignTo(uint64_t Size, Align A)

Returns a multiple of A needed to store Size bytes.

DWARFExpression::Operation Op

LLVM_ABI bool isAssertMI(const MachineInstr &MI)

Returns true if the instruction MI is one of the assert instructions.

LLVM_ABI LLT getLLTForType(Type &Ty, const DataLayout &DL)

Construct a low-level type based on an LLVM type.

LLVM_ABI Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

This struct is a compact representation of a valid (non-zero power of two) alignment.

SmallVector< Register, 4 > Regs

SmallVector< ISD::ArgFlagsTy, 4 > Flags

Base class for ValueHandlers used for arguments coming into the current function, or for return value...

void assignValueToReg(Register ValVReg, Register PhysReg, const CCValAssign &VA) override

Provides a default implementation for argument handling.

Base class for ValueHandlers used for arguments passed to a function call, or for return values.

MachineIRBuilder & MIRBuilder

MachineRegisterInfo & MRI

virtual LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const

Return the in-memory size to write for the argument at VA.

LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const

This method returns an LLVM type corresponding to the specified EVT.

Describes a register that needs to be forwarded from the prologue to a musttail call.

This class contains a discriminated union of information about pointers in memory operands,...

static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)

Stack pointer relative access.

static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)

Return a MachinePointerInfo record that refers to the specified FrameIndex.