LLVM: lib/CodeGen/GlobalISel/CallLowering.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

27

28#define DEBUG_TYPE "call-lowering"

29

30using namespace llvm;

31

32void CallLowering::anchor() {}

33

34

35static void

38

39 if (AttrFn(Attribute::SExt))

40 Flags.setSExt();

41 if (AttrFn(Attribute::ZExt))

42 Flags.setZExt();

43 if (AttrFn(Attribute::InReg))

44 Flags.setInReg();

45 if (AttrFn(Attribute::StructRet))

46 Flags.setSRet();

47 if (AttrFn(Attribute::Nest))

48 Flags.setNest();

49 if (AttrFn(Attribute::ByVal))

50 Flags.setByVal();

51 if (AttrFn(Attribute::ByRef))

52 Flags.setByRef();

53 if (AttrFn(Attribute::Preallocated))

54 Flags.setPreallocated();

55 if (AttrFn(Attribute::InAlloca))

56 Flags.setInAlloca();

57 if (AttrFn(Attribute::Returned))

58 Flags.setReturned();

59 if (AttrFn(Attribute::SwiftSelf))

60 Flags.setSwiftSelf();

61 if (AttrFn(Attribute::SwiftAsync))

62 Flags.setSwiftAsync();

63 if (AttrFn(Attribute::SwiftError))

64 Flags.setSwiftError();

65}

66

68 unsigned ArgIdx) const {

71 return Call.paramHasAttr(ArgIdx, Attr);

72 });

73 return Flags;

74}

75

80 return Call.hasRetAttr(Attr);

81 });

82 return Flags;

83}

84

86 const AttributeList &Attrs,

87 unsigned OpIdx) const {

89 return Attrs.hasAttributeAtIndex(OpIdx, Attr);

90 });

91}

92

97 std::optional PAI,

98 Register ConvergenceCtrlToken,

99 std::function<Register()> GetCalleeReg) const {

104 bool CanBeTailCalled = CB.isTailCall() &&

107 .getFnAttribute("disable-tail-calls")

108 .getValueAsString() != "true");

109

113

116 Info.CanLowerReturn = canLowerReturn(MF, CallConv, SplitArgs, IsVarArg);

117

119

120 if (!Info.CanLowerReturn) {

121

123

124

125

126 CanBeTailCalled = false;

127 }

128

129

130

131

132 unsigned i = 0;

134 for (const auto &Arg : CB.args()) {

136 setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB);

137 if (i >= NumFixedArgs)

138 OrigArg.Flags[0].setVarArg();

139

140

141

143 CanBeTailCalled = false;

144

145 Info.OrigArgs.push_back(OrigArg);

146 ++i;

147 }

148

149

150

152

153

154

158 }

159

161 if (F->hasFnAttribute(Attribute::NonLazyBind)) {

165 } else {

167 }

169

170

171

173 } else

175

177 Align ReturnHintAlign;

178

180

181 if (!Info.OrigRet.Ty->isVoidTy()) {

182 setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB);

183

185 if (*Alignment > Align(1)) {

186 ReturnHintAlignReg = MRI.cloneVirtualRegister(ResRegs[0]);

187 Info.OrigRet.Regs[0] = ReturnHintAlignReg;

188 ReturnHintAlign = *Alignment;

189 }

190 }

191 }

192

196 assert(Info.CFIType->getType()->isIntegerTy(32) && "Invalid CFI type");

197 }

198

201 }

202

203 Info.CB = &CB;

204 Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);

205 Info.CallConv = CallConv;

206 Info.SwiftErrorVReg = SwiftErrorVReg;

207 Info.PAI = PAI;

208 Info.ConvergenceCtrlToken = ConvergenceCtrlToken;

210 Info.IsTailCall = CanBeTailCalled;

211 Info.IsVarArg = IsVarArg;

212 if (lowerCall(MIRBuilder, Info))

213 return false;

214

215 if (ReturnHintAlignReg && !Info.LoweredTailCall) {

217 ReturnHintAlign);

218 }

219

220 return true;

221}

222

223template

226 const FuncInfoTy &FuncInfo) const {

227 auto &Flags = Arg.Flags[0];

228 const AttributeList &Attrs = FuncInfo.getAttributes();

230

232 if (PtrTy) {

233 Flags.setPointer();

235 }

236

237 Align MemAlign = DL.getABITypeAlign(Arg.Ty);

238 if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated() ||

239 Flags.isByRef()) {

240 assert(OpIdx >= AttributeList::FirstArgIndex);

241 unsigned ParamIdx = OpIdx - AttributeList::FirstArgIndex;

242

243 Type *ElementTy = FuncInfo.getParamByValType(ParamIdx);

244 if (!ElementTy)

245 ElementTy = FuncInfo.getParamByRefType(ParamIdx);

246 if (!ElementTy)

247 ElementTy = FuncInfo.getParamInAllocaType(ParamIdx);

248 if (!ElementTy)

249 ElementTy = FuncInfo.getParamPreallocatedType(ParamIdx);

250

251 assert(ElementTy && "Must have byval, inalloca or preallocated type");

252

253 uint64_t MemSize = DL.getTypeAllocSize(ElementTy);

254 if (Flags.isByRef())

255 Flags.setByRefSize(MemSize);

256 else

257 Flags.setByValSize(MemSize);

258

259

260

261 if (auto ParamAlign = FuncInfo.getParamStackAlign(ParamIdx))

262 MemAlign = *ParamAlign;

263 else if ((ParamAlign = FuncInfo.getParamAlign(ParamIdx)))

264 MemAlign = *ParamAlign;

265 else

267 } else if (OpIdx >= AttributeList::FirstArgIndex) {

268 if (auto ParamAlign =

269 FuncInfo.getParamStackAlign(OpIdx - AttributeList::FirstArgIndex))

270 MemAlign = *ParamAlign;

271 }

272 Flags.setMemAlign(MemAlign);

273 Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));

274

275

276

277 if (Flags.isSwiftSelf())

278 Flags.setReturned(false);

279}

280

281template void

284 const Function &FuncInfo) const;

285

286template void

289 const CallBase &FuncInfo) const;

290

297

299 ComputeValueVTs(*TLI, DL, OrigArg.Ty, SplitVTs, nullptr, Offsets,

300 0);

301

302 if (SplitVTs.size() == 0)

303 return;

304

305 if (SplitVTs.size() == 1) {

306

307

308 SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),

311 return;

312 }

313

314

315 assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch");

316

317 bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(

318 OrigArg.Ty, CallConv, false, DL);

319 for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {

320 Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);

322 OrigArg.Flags[0]);

323 if (NeedsRegBlock)

324 SplitArgs.back().Flags[0].setInConsecutiveRegs();

325 }

326

327 SplitArgs.back().Flags[0].setInConsecutiveRegsLast();

328}

329

330

335 LLT LLTy = MRI.getType(DstRegs[0]);

336 LLT PartLLT = MRI.getType(SrcRegs[0]);

337

338

340 if (LCMTy == LLTy) {

341

343 return B.buildConcatVectors(DstRegs[0], SrcRegs);

344 }

345

346

347

349 if (LCMTy != PartLLT) {

351 return B.buildDeleteTrailingVectorElements(

352 DstRegs[0], B.buildMergeLikeInstr(LCMTy, SrcRegs));

353 } else {

354

355

357 UnmergeSrcReg = SrcRegs[0];

358 }

359

361

364

365

366 for (int I = DstRegs.size(); I != NumDst; ++I)

367 PadDstRegs[I] = MRI.createGenericVirtualRegister(LLTy);

368

369 if (PadDstRegs.size() == 1)

370 return B.buildDeleteTrailingVectorElements(DstRegs[0], UnmergeSrcReg);

371 return B.buildUnmerge(PadDstRegs, UnmergeSrcReg);

372}

373

374

375

376

377

382

383 if (PartLLT == LLTy) {

384

385

386 assert(OrigRegs[0] == Regs[0]);

387 return;

388 }

389

391 Regs.size() == 1) {

392 B.buildBitcast(OrigRegs[0], Regs[0]);

393 return;

394 }

395

396

397

402 OrigRegs.size() == 1 && Regs.size() == 1) {

404

405 LLT LocTy = MRI.getType(SrcReg);

406

407 if (Flags.isSExt()) {

409 .getReg(0);

410 } else if (Flags.isZExt()) {

412 .getReg(0);

413 }

414

415

416 LLT OrigTy = MRI.getType(OrigRegs[0]);

419 B.buildIntToPtr(OrigRegs[0], B.buildTrunc(IntPtrTy, SrcReg));

420 return;

421 }

422

423 B.buildTrunc(OrigRegs[0], SrcReg);

424 return;

425 }

426

429 LLT OrigTy = MRI.getType(OrigRegs[0]);

430

433 B.buildMergeValues(OrigRegs[0], Regs);

434 else {

435 auto Widened = B.buildMergeLikeInstr(LLT::scalar(SrcSize), Regs);

436 B.buildTrunc(OrigRegs[0], Widened);

437 }

438

439 return;

440 }

441

445

446

447

448

449

452 Regs.size() == 1) {

455 CastRegs[0] = B.buildBitcast(NewTy, Regs[0]).getReg(0);

456 PartLLT = NewTy;

457 }

458

461 } else {

462 unsigned I = 0;

464

465

466

467

468 for (Register SrcReg : CastRegs)

469 CastRegs[I++] = B.buildBitcast(GCDTy, SrcReg).getReg(0);

471 }

472

473 return;

474 }

475

477

479

480

481

482 LLT RealDstEltTy = MRI.getType(OrigRegs[0]).getElementType();

483

485

486 if (DstEltTy == PartLLT) {

487

488

491 MRI.setType(Reg, RealDstEltTy);

492 }

493

494 B.buildBuildVector(OrigRegs[0], Regs);

496

497

499 int PartsPerElt =

502

503 for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) {

505 B.buildMergeLikeInstr(ExtendedPartTy, Regs.take_front(PartsPerElt));

507 Merge = B.buildTrunc(RealDstEltTy, Merge);

508

509 MRI.setType(Merge.getReg(0), RealDstEltTy);

512 }

513

514 B.buildBuildVector(OrigRegs[0], EltMerges);

515 } else {

516

517

520

522 if (NumElts == Regs.size())

523 BuildVec = B.buildBuildVector(BVType, Regs).getReg(0);

524 else {

525

526

528 LLT SrcEltTy = MRI.getType(Regs[0]);

529

530 LLT OriginalEltTy = MRI.getType(OrigRegs[0]).getElementType();

531

532

533

535 unsigned EltPerReg =

537

541 auto Unmerge = B.buildUnmerge(OriginalEltTy, R);

542 for (unsigned K = 0; K < EltPerReg; ++K)

543 BVRegs.push_back(B.buildAnyExt(PartLLT, Unmerge.getReg(K)).getReg(0));

544 }

545

546

547

548 if (BVRegs.size() > NumElts) {

549 assert((BVRegs.size() - NumElts) < EltPerReg);

551 }

552 BuildVec = B.buildBuildVector(BVType, BVRegs).getReg(0);

553 }

554 B.buildTrunc(OrigRegs[0], BuildVec);

555 }

556}

557

558

559

560

561

562

565 unsigned ExtendOp = TargetOpcode::G_ANYEXT) {

566

567 assert(SrcTy != PartTy && "identical part types shouldn't reach here");

568

570

571 if (PartTy.isVector() == SrcTy.isVector() &&

574 B.buildInstr(ExtendOp, {DstRegs[0]}, {SrcReg});

575 return;

576 }

577

578 if (SrcTy.isVector() && !PartTy.isVector() &&

580

581 auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(), SrcReg);

582 for (int i = 0, e = DstRegs.size(); i != e; ++i)

583 B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i));

584 return;

585 }

586

587 if (SrcTy.isVector() && PartTy.isVector() &&

588 PartTy.getSizeInBits() == SrcTy.getSizeInBits() &&

591

593 B.buildPadVectorWithUndefElements(DstReg, SrcReg);

594 return;

595 }

596

598 if (GCDTy == PartTy) {

599

600 B.buildUnmerge(DstRegs, SrcReg);

601 return;

602 }

603

604 if (SrcTy.isVector() && !PartTy.isVector() &&

605 SrcTy.getScalarSizeInBits() > PartTy.getSizeInBits()) {

606 LLT ExtTy =

609 SrcTy.getNumElements()));

610 auto Ext = B.buildAnyExt(ExtTy, SrcReg);

611 B.buildUnmerge(DstRegs, Ext);

612 return;

613 }

614

616 LLT DstTy = MRI.getType(DstRegs[0]);

618

619 if (PartTy.isVector() && LCMTy == PartTy) {

621 B.buildPadVectorWithUndefElements(DstRegs[0], SrcReg);

622 return;

623 }

624

626 const unsigned SrcSize = SrcTy.getSizeInBits();

628

629 Register UnmergeSrc = SrcReg;

630

631 if (!LCMTy.isVector() && CoveringSize != SrcSize) {

632

633 if (SrcTy.isScalar() && DstTy.isScalar()) {

634 CoveringSize = alignTo(SrcSize, DstSize);

636 UnmergeSrc = B.buildInstr(ExtendOp, {CoverTy}, {SrcReg}).getReg(0);

637 } else {

638

639

640 Register Undef = B.buildUndef(SrcTy).getReg(0);

642 for (unsigned Size = SrcSize; Size != CoveringSize; Size += SrcSize)

644 UnmergeSrc = B.buildMergeLikeInstr(LCMTy, MergeParts).getReg(0);

645 }

646 }

647

648 if (LCMTy.isVector() && CoveringSize != SrcSize)

649 UnmergeSrc = B.buildPadVectorWithUndefElements(LCMTy, SrcReg).getReg(0);

650

651 B.buildUnmerge(DstRegs, UnmergeSrc);

652}

653

662

663 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, F.getContext());

665 return false;

666

667 return handleAssignments(Handler, Args, CCInfo, ArgLocs, MIRBuilder,

668 ThisReturnRegs);

669}

670

672 if (Flags.isSExt())

673 return TargetOpcode::G_SEXT;

674 if (Flags.isZExt())

675 return TargetOpcode::G_ZEXT;

676 return TargetOpcode::G_ANYEXT;

677}

678

684

685 unsigned NumArgs = Args.size();

686 for (unsigned i = 0; i != NumArgs; ++i) {

688

689 MVT NewVT = TLI->getRegisterTypeForCallingConv(Ctx, CallConv, CurVT);

690

691

692

693 unsigned NumParts =

694 TLI->getNumRegistersForCallingConv(Ctx, CallConv, CurVT);

695

696 if (NumParts == 1) {

697

699 Args[i].Flags[0], CCInfo))

700 return false;

701 continue;

702 }

703

704

705

706

707

708

709

710

711

712

713

714

716 Args[i].Flags.clear();

717

718 for (unsigned Part = 0; Part < NumParts; ++Part) {

720 if (Part == 0) {

721 Flags.setSplit();

722 } else {

723 Flags.setOrigAlign(Align(1));

724 if (Part == NumParts - 1)

725 Flags.setSplitEnd();

726 }

727

728 Args[i].Flags.push_back(Flags);

730 Args[i].Flags[Part], CCInfo)) {

731

732 return false;

733 }

734 }

735 }

736

737 return true;

738}

739

750

751 const unsigned NumArgs = Args.size();

752

753

754

755

756

757

758

759

760

761

762

763

764

765 SmallVector<std::function<void()>> DelayedOutgoingRegAssignments;

766

767 for (unsigned i = 0, j = 0; i != NumArgs; ++i, ++j) {

768 assert(j < ArgLocs.size() && "Skipped too many arg locs");

770 assert(VA.getValNo() == i && "Location doesn't correspond to current arg");

771

773 std::function<void()> Thunk;

775 Args[i], ArrayRef(ArgLocs).slice(j), &Thunk);

776 if (Thunk)

777 DelayedOutgoingRegAssignments.emplace_back(Thunk);

778 if (!NumArgRegs)

779 return false;

780 j += (NumArgRegs - 1);

781 continue;

782 }

783

785

788

789 const LLT LocTy(LocVT);

790 const LLT ValTy(ValVT);

795 AllocaAddressSpace, DL.getPointerSizeInBits(AllocaAddressSpace));

796

797

798

799

800 const unsigned NumParts = Args[i].Flags.size();

801

802

803 Args[i].OrigRegs.assign(Args[i].Regs.begin(), Args[i].Regs.end());

804

805 if (NumParts != 1 || NewLLT != OrigTy) {

806

807

808 Args[i].Regs.resize(NumParts);

809

810

811

814 Args[i].Regs[0] = MRI.createGenericVirtualRegister(PointerTy);

815 } else {

816

817

818

819 for (unsigned Part = 0; Part < NumParts; ++Part)

820 Args[i].Regs[Part] = MRI.createGenericVirtualRegister(NewLLT);

821 }

822 }

823

824 assert((j + (NumParts - 1)) < ArgLocs.size() &&

825 "Too many regs for number of args");

826

827

830 assert(Args[i].OrigRegs.size() == 1);

831 buildCopyToRegs(MIRBuilder, Args[i].Regs, Args[i].OrigRegs[0], OrigTy,

833 }

834

835 bool IndirectParameterPassingHandled = false;

836 bool BigEndianPartOrdering = TLI->hasBigEndianPartOrdering(OrigVT, DL);

837 for (unsigned Part = 0; Part < NumParts; ++Part) {

839 "Only the first parameter should be processed when "

840 "handling indirect passing!");

841 Register ArgReg = Args[i].Regs[Part];

842

843 unsigned Idx = BigEndianPartOrdering ? NumParts - 1 - Part : Part;

846

847

848

849

850

851

852

853

854

855

856

857

858

861 Align AlignmentForStored = DL.getPrefTypeAlign(Args[i].Ty);

863

864

866 AlignmentForStored, false);

871

872 MIRBuilder.buildStore(Args[i].OrigRegs[Part], PointerToStackReg,

873 StackPointerMPO,

875

876 ArgReg = PointerToStackReg;

877 IndirectParameterPassingHandled = true;

878 }

879

880 if (VA.isMemLoc() && !Flags.isByVal()) {

881

882

883

884

885

887

894

895

896

897

898

901 else

903 VA);

904 } else if (VA.isMemLoc() && Flags.isByVal()) {

905 assert(Args[i].Regs.size() == 1 && "didn't expect split byval pointer");

906

908

912 MIRBuilder.buildCopy(Args[i].Regs[0], StackAddr);

913 } else {

914

915

916

917 uint64_t MemSize = Flags.getByValSize();

919

923

925 if (!Args[i].OrigValue) {

926

927

928 const LLT PtrTy = MRI.getType(StackAddr);

930 }

931

932 Align DstAlign = std::max(Flags.getNonZeroByValAlign(),

934

935 Align SrcAlign = std::max(Flags.getNonZeroByValAlign(),

937

939 DstMPO, DstAlign, SrcMPO, SrcAlign,

940 MemSize, VA);

941 }

942 } else if (i == 0 && !ThisReturnRegs.empty() &&

948 } else {

949 DelayedOutgoingRegAssignments.emplace_back([=, &Handler]() {

951 });

952 }

953

954

955

956

959 Align Alignment = DL.getABITypeAlign(Args[i].Ty);

961

962

963

964

965

966 MIRBuilder.buildLoad(Args[i].OrigRegs[0], Args[i].Regs[0], MPO,

967 Alignment);

968

969 IndirectParameterPassingHandled = true;

970 }

971

972 if (IndirectParameterPassingHandled)

973 break;

974 }

975

976

977

978

980 !IndirectParameterPassingHandled) {

981

982

983 buildCopyFromRegs(MIRBuilder, Args[i].OrigRegs, Args[i].Regs, OrigTy,

984 LocTy, Args[i].Flags[0]);

985 }

986

987 j += NumParts - 1;

988 }

989 for (auto &Fn : DelayedOutgoingRegAssignments)

990 Fn();

991

992 return true;

993}

994

997 int FI) const {

1001

1004 ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, nullptr, &Offsets, 0);

1005

1007

1008 unsigned NumValues = SplitVTs.size();

1009 Align BaseAlign = DL.getPrefTypeAlign(RetTy);

1010 Type *RetPtrTy =

1013

1015

1016 for (unsigned I = 0; I < NumValues; ++I) {

1019 Offsets[I]);

1021 MRI.getType(VRegs[I]),

1023 MIRBuilder.buildLoad(VRegs[I], Addr, *MMO);

1024 }

1025}

1026

1033

1036 ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, nullptr, &Offsets, 0);

1037

1039

1040 unsigned NumValues = SplitVTs.size();

1041 Align BaseAlign = DL.getPrefTypeAlign(RetTy);

1042 unsigned AS = DL.getAllocaAddrSpace();

1044

1046

1047 for (unsigned I = 0; I < NumValues; ++I) {

1050 Offsets[I]);

1052 MRI.getType(VRegs[I]),

1054 MIRBuilder.buildStore(VRegs[I], Addr, *MMO);

1055 }

1056}

1057

1061 unsigned AS = DL.getAllocaAddrSpace();

1062 DemoteReg = MRI.createGenericVirtualRegister(

1064

1066

1069

1070

1072

1073 ArgInfo DemoteArg(DemoteReg, ValueVTs[0].getTypeForEVT(PtrTy->getContext()),

1075 setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, F);

1076 DemoteArg.Flags[0].setSRet();

1077 SplitArgs.insert(SplitArgs.begin(), DemoteArg);

1078}

1079

1085 unsigned AS = DL.getAllocaAddrSpace();

1087

1089 DL.getTypeAllocSize(RetTy), DL.getPrefTypeAlign(RetTy), false);

1090

1094 setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, CB);

1095 DemoteArg.Flags[0].setSRet();

1096

1097 Info.OrigArgs.insert(Info.OrigArgs.begin(), DemoteArg);

1098 Info.DemoteStackIndex = FI;

1099 Info.DemoteRegister = DemoteReg;

1100}

1101

1105 for (unsigned I = 0, E = Outs.size(); I < E; ++I) {

1108 return false;

1109 }

1110 return true;

1111}

1112

1114 AttributeList Attrs,

1119

1123

1124 for (EVT VT : SplitVTs) {

1125 unsigned NumParts =

1126 TLI->getNumRegistersForCallingConv(Context, CallConv, VT);

1127 MVT RegVT = TLI->getRegisterTypeForCallingConv(Context, CallConv, VT);

1129

1130 for (unsigned I = 0; I < NumParts; ++I) {

1132 }

1133 }

1134}

1135

1138 Type *ReturnType = F.getReturnType();

1140

1142 getReturnInfo(CallConv, ReturnType, F.getAttributes(), SplitArgs,

1144 return canLowerReturn(MF, CallConv, SplitArgs, F.isVarArg());

1145}

1146

1151 for (unsigned i = 0; i < OutLocs.size(); ++i) {

1152 const auto &ArgLoc = OutLocs[i];

1153

1154 if (!ArgLoc.isRegLoc())

1155 continue;

1156

1157 MCRegister PhysReg = ArgLoc.getLocReg();

1158

1159

1161 continue;

1162

1165 << "... Call has an argument passed in a callee-saved register.\n");

1166

1167

1168 const ArgInfo &OutInfo = OutArgs[i];

1169

1170 if (OutInfo.Regs.size() > 1) {

1172 dbgs() << "... Cannot handle arguments in multiple registers.\n");

1173 return false;

1174 }

1175

1176

1177

1178

1180 if (!RegDef || RegDef->getOpcode() != TargetOpcode::COPY) {

1183 << "... Parameter was not copied into a VReg, cannot tail call.\n");

1184 return false;

1185 }

1186

1187

1189 if (CopyRHS != PhysReg) {

1190 LLVM_DEBUG(dbgs() << "... Callee-saved register was not copied into "

1191 "VReg, cannot tail call.\n");

1192 return false;

1193 }

1194 }

1195

1196 return true;

1197}

1198

1207

1208 if (CallerCC == CalleeCC)

1209 return true;

1210

1212 CCState CCInfo1(CalleeCC, Info.IsVarArg, MF, ArgLocs1, F.getContext());

1214 return false;

1215

1217 CCState CCInfo2(CallerCC, F.isVarArg(), MF, ArgLocs2, F.getContext());

1219 return false;

1220

1221

1222

1223 if (ArgLocs1.size() != ArgLocs2.size())

1224 return false;

1225

1226

1227 for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) {

1230

1231

1232

1234 return false;

1235

1237

1239 return false;

1240

1241

1242 continue;

1243 }

1244

1245

1247 return false;

1248 }

1249

1250 return true;

1251}

1252

1256 if (ValVT != MVT::iPTR) {

1257 LLT ValTy(ValVT);

1258

1259

1260

1261 if (Flags.isPointer()) {

1263 ValTy.getScalarSizeInBits());

1265 return LLT::vector(ValTy.getElementCount(), PtrTy);

1266 return PtrTy;

1267 }

1268

1269 return ValTy;

1270 }

1271

1272 unsigned AddrSpace = Flags.getPointerAddrSpace();

1273 return LLT::pointer(AddrSpace, DL.getPointerSize(AddrSpace));

1274}

1275

1283 SrcPtrInfo,

1285 SrcAlign);

1286

1288 DstPtrInfo,

1290 MemSize, DstAlign);

1291

1292 const LLT PtrTy = MRI.getType(DstPtr);

1294

1295 auto SizeConst = MIRBuilder.buildConstant(SizeTy, MemSize);

1296 MIRBuilder.buildMemCpy(DstPtr, SrcPtr, SizeConst, *DstMMO, *SrcMMO);

1297}

1298

1301 unsigned MaxSizeBits) {

1304

1305 if (LocTy.getSizeInBits() == ValTy.getSizeInBits())

1306 return ValReg;

1307

1309 if (MaxSizeBits <= ValTy.getSizeInBits())

1310 return ValReg;

1312 }

1313

1314 const LLT ValRegTy = MRI.getType(ValReg);

1316

1317

1319 ValReg = MIRBuilder.buildPtrToInt(IntPtrTy, ValReg).getReg(0);

1320 }

1321

1323 default:

1324 break;

1327

1328

1329 return ValReg;

1331 auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg);

1332 return MIB.getReg(0);

1333 }

1335 Register NewReg = MRI.createGenericVirtualRegister(LocTy);

1336 MIRBuilder.buildSExt(NewReg, ValReg);

1337 return NewReg;

1338 }

1340 Register NewReg = MRI.createGenericVirtualRegister(LocTy);

1341 MIRBuilder.buildZExt(NewReg, ValReg);

1342 return NewReg;

1343 }

1344 }

1346}

1347

1348void CallLowering::ValueAssigner::anchor() {}

1349

1355 .buildAssertZExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,

1357 .getReg(0);

1358 }

1361 .buildAssertSExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,

1363 .getReg(0);

1364 break;

1365 }

1366 default:

1367 return SrcReg;

1368 }

1369}

1370

1371

1372

1373

1374

1375

1376

1378 if (SrcTy == DstTy)

1379 return true;

1380

1381 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())

1382 return false;

1383

1384 SrcTy = SrcTy.getScalarType();

1386

1387 return (SrcTy.isPointer() && DstTy.isScalar()) ||

1388 (DstTy.isPointer() && SrcTy.isScalar());

1389}

1390

1394 const LLT LocTy(LocVT);

1395 const LLT RegTy = MRI.getType(ValVReg);

1396

1398 MIRBuilder.buildCopy(ValVReg, PhysReg);

1399 return;

1400 }

1401

1402 auto Copy = MIRBuilder.buildCopy(LocTy, PhysReg);

1404 MIRBuilder.buildTrunc(ValVReg, Hint);

1405}

unsigned const MachineRegisterInfo * MRI

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

static void addFlagsUsingAttrFn(ISD::ArgFlagsTy &Flags, const std::function< bool(Attribute::AttrKind)> &AttrFn)

Helper function which updates Flags when AttrFn returns true.

Definition CallLowering.cpp:36

static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef< Register > DstRegs, Register SrcReg, LLT SrcTy, LLT PartTy, unsigned ExtendOp=TargetOpcode::G_ANYEXT)

Create a sequence of instructions to expand the value in SrcReg (of type SrcTy) to the types in DstRe...

Definition CallLowering.cpp:563

static MachineInstrBuilder mergeVectorRegsToResultRegs(MachineIRBuilder &B, ArrayRef< Register > DstRegs, ArrayRef< Register > SrcRegs)

Pack values SrcRegs to cover the vector type result DstRegs.

Definition CallLowering.cpp:332

static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef< Register > OrigRegs, ArrayRef< Register > Regs, LLT LLTy, LLT PartLLT, const ISD::ArgFlagsTy Flags)

Create a sequence of instructions to combine pieces split into register typed values to the original ...

Definition CallLowering.cpp:378

static bool isCopyCompatibleType(LLT SrcTy, LLT DstTy)

Check if we can use a basic COPY instruction between the two types.

Definition CallLowering.cpp:1377

static unsigned extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags)

Definition CallLowering.cpp:671

This file describes how to lower LLVM calls to machine code calls.

Module.h This file contains the declarations for the Module class.

This file declares the MachineIRBuilder class.

Promote Memory to Register

static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)

MachineInstr unsigned OpIdx

This file describes how to lower LLVM code to machine code.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

ArrayRef< T > take_front(size_t N=1) const

Return a copy of *this with only the first N elements.

ArrayRef< T > drop_front(size_t N=1) const

Drop the first N elements of the array.

const T & front() const

front - Get the first element.

size_t size() const

size - Get the array size.

bool empty() const

empty - Check if the array is empty.

AttrKind

This enumeration lists the attributes that can be associated with parameters, function results,...

CCState - This class holds information needed while lowering arguments and return values.

CallingConv::ID getCallingConv() const

LLVMContext & getContext() const

CCValAssign - Represent assignment of one arg/retval to a location.

Register getLocReg() const

LocInfo getLocInfo() const

int64_t getLocMemOffset() const

unsigned getValNo() const

Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...

MaybeAlign getRetAlign() const

Extract the alignment of the return value.

std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const

Return an operand bundle by name, if present.

CallingConv::ID getCallingConv() const

LLVM_ABI bool isMustTailCall() const

Tests if this call site must be tail call optimized.

LLVM_ABI bool isIndirectCall() const

Return true if the callsite is an indirect call.

unsigned countOperandBundlesOfType(StringRef Name) const

Return the number of operand bundles with the tag Name attached to this instruction.

Value * getCalledOperand() const

bool isConvergent() const

Determine if the invoke is convergent.

FunctionType * getFunctionType() const

iterator_range< User::op_iterator > args()

Iteration adapter for range-for loops.

AttributeList getAttributes() const

Return the attributes for this call.

LLVM_ABI bool isTailCall() const

Tests if this call site is marked as a tail call.

void insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder, const CallBase &CB, CallLoweringInfo &Info) const

For the call-base described by CB, insert the hidden sret ArgInfo to the OrigArgs field of Info.

Definition CallLowering.cpp:1080

void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg, int FI) const

Load the returned value from the stack into virtual registers in VRegs.

Definition CallLowering.cpp:995

bool checkReturnTypeForCallConv(MachineFunction &MF) const

Toplevel function to check the return type based on the target calling convention.

Definition CallLowering.cpp:1136

bool handleAssignments(ValueHandler &Handler, SmallVectorImpl< ArgInfo > &Args, CCState &CCState, SmallVectorImpl< CCValAssign > &ArgLocs, MachineIRBuilder &MIRBuilder, ArrayRef< Register > ThisReturnRegs={}) const

Use Handler to insert code to handle the argument/return values represented by Args.

Definition CallLowering.cpp:740

bool resultsCompatible(CallLoweringInfo &Info, MachineFunction &MF, SmallVectorImpl< ArgInfo > &InArgs, ValueAssigner &CalleeAssigner, ValueAssigner &CallerAssigner) const

Definition CallLowering.cpp:1199

void splitToValueTypes(const ArgInfo &OrigArgInfo, SmallVectorImpl< ArgInfo > &SplitArgs, const DataLayout &DL, CallingConv::ID CallConv, SmallVectorImpl< uint64_t > *Offsets=nullptr) const

Break OrigArgInfo into one or more pieces the calling convention can process, returned in SplitArgs.

Definition CallLowering.cpp:291

virtual bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv, SmallVectorImpl< BaseArgInfo > &Outs, bool IsVarArg) const

This hook must be implemented to check whether the return values described by Outs can fit into the r...

virtual bool isTypeIsValidForThisReturn(EVT Ty) const

For targets which support the "returned" parameter attribute, returns true if the given type is a val...

void insertSRetIncomingArgument(const Function &F, SmallVectorImpl< ArgInfo > &SplitArgs, Register &DemoteReg, MachineRegisterInfo &MRI, const DataLayout &DL) const

Insert the hidden sret ArgInfo to the beginning of SplitArgs.

Definition CallLowering.cpp:1058

ISD::ArgFlagsTy getAttributesForArgIdx(const CallBase &Call, unsigned ArgIdx) const

Definition CallLowering.cpp:67

bool determineAndHandleAssignments(ValueHandler &Handler, ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, bool IsVarArg, ArrayRef< Register > ThisReturnRegs={}) const

Invoke ValueAssigner::assignArg on each of the given Args and then use Handler to move them to the as...

Definition CallLowering.cpp:654

void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy, ArrayRef< Register > VRegs, Register DemoteReg) const

Store the return value given by VRegs into stack starting at the offset specified in DemoteReg.

Definition CallLowering.cpp:1027

void addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags, const AttributeList &Attrs, unsigned OpIdx) const

Adds flags to Flags based off of the attributes in Attrs.

Definition CallLowering.cpp:85

bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< ArgInfo > &OutVals) const

Check whether parameters to a call that are passed in callee saved registers are the same as from the...

Definition CallLowering.cpp:1147

void getReturnInfo(CallingConv::ID CallConv, Type *RetTy, AttributeList Attrs, SmallVectorImpl< BaseArgInfo > &Outs, const DataLayout &DL) const

Get the type and the ArgFlags for the split components of RetTy as returned by ComputeValueVTs.

Definition CallLowering.cpp:1113

bool determineAssignments(ValueAssigner &Assigner, SmallVectorImpl< ArgInfo > &Args, CCState &CCInfo) const

Analyze the argument list in Args, using Assigner to populate CCInfo.

Definition CallLowering.cpp:679

bool checkReturn(CCState &CCInfo, SmallVectorImpl< BaseArgInfo > &Outs, CCAssignFn *Fn) const

Definition CallLowering.cpp:1102

const TargetLowering * getTLI() const

Getter for generic TargetLowering class.

virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const

This hook must be implemented to lower the given call instruction, including argument and return valu...

void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL, const FuncInfoTy &FuncInfo) const

Definition CallLowering.cpp:224

ISD::ArgFlagsTy getAttributesForReturn(const CallBase &Call) const

Definition CallLowering.cpp:77

A parsed version of the target data layout string in and methods for querying it.

unsigned getAllocaAddrSpace() const

unsigned getNumParams() const

Return the number of fixed parameters this function type requires.

MDNode * getMetadata(unsigned KindID) const

Get the metadata of given kind attached to this Instruction.

constexpr unsigned getScalarSizeInBits() const

constexpr bool isScalar() const

constexpr LLT changeElementType(LLT NewEltTy) const

If this type is a vector, return a vector with the same number of elements but the new element type.

static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)

Get a low-level vector of some number of elements and element width.

static constexpr LLT scalar(unsigned SizeInBits)

Get a low-level scalar or aggregate "bag of bits".

constexpr uint16_t getNumElements() const

Returns the number of elements in a vector LLT.

constexpr bool isVector() const

static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)

Get a low-level pointer in the given address space.

constexpr TypeSize getSizeInBits() const

Returns the total size of the type. Must only be called on sized types.

constexpr bool isPointer() const

constexpr LLT getElementType() const

Returns the vector's element type. Only valid for vector types.

constexpr ElementCount getElementCount() const

constexpr unsigned getAddressSpace() const

static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)

Get a low-level fixed-width vector of some number of elements and element width.

constexpr LLT changeElementCount(ElementCount EC) const

Return a vector or scalar with the same element type and the new element count.

constexpr LLT getScalarType() const

constexpr TypeSize getSizeInBytes() const

Returns the total size of the type in bytes, i.e.

This is an important class for using LLVM in a threaded context.

Wrapper class representing physical registers. Should be passed by value.

unsigned getVectorNumElements() const

bool isVector() const

Return true if this is a vector value type.

static LLVM_ABI MVT getVT(Type *Ty, bool HandleUnknown=false)

Return the value type corresponding to the specified type.

The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.

LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)

Create a new statically sized stack object, returning a nonnegative identifier to represent it.

MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)

getMachineMemOperand - Allocate a new MachineMemOperand.

MachineFrameInfo & getFrameInfo()

getFrameInfo - Return the frame info object for the current function.

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

const DataLayout & getDataLayout() const

Return the DataLayout attached to the Module associated to this MF.

Function & getFunction()

Return the LLVM function that this machine code represents.

const TargetMachine & getTarget() const

getTarget - Return the target machine this machine code is compiled with

Helper class to build MachineInstr.

MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV)

Build and insert Res = G_GLOBAL_VALUE GV.

std::optional< MachineInstrBuilder > materializeObjectPtrOffset(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)

Materialize and insert an instruction with appropriate flags for addressing some offset of an object,...

MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)

Build and insert Res = G_LOAD Addr, MMO.

MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)

Build and insert G_STORE Val, Addr, MMO.

MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)

Build and insert Res = G_FRAME_INDEX Idx.

MachineFunction & getMF()

Getter for the function we currently build.

MachineInstrBuilder buildAssertAlign(const DstOp &Res, const SrcOp &Op, Align AlignVal)

Build and insert Res = G_ASSERT_ALIGN Op, AlignVal.

MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)

Build and insert Res = COPY Op.

const DataLayout & getDataLayout() const

Register getReg(unsigned Idx) const

Get the register for the operand index.

Representation of each machine instruction.

unsigned getOpcode() const

Returns the opcode of this MachineInstr.

const MachineOperand & getOperand(unsigned i) const

A description of a memory reference used in the backend.

@ MODereferenceable

The memory access is dereferenceable (i.e., doesn't trap).

@ MOLoad

The memory access reads data.

@ MOStore

The memory access writes data.

Register getReg() const

getReg - Returns the register number.

static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)

static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)

clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.

static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)

MachineRegisterInfo - Keep track of information for virtual and physical registers,...

Class to represent pointers.

static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)

This constructs a pointer to an object of the specified type in a numbered address space.

Wrapper class representing virtual and physical registers.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

reference emplace_back(ArgTypes &&... Args)

void reserve(size_type N)

iterator insert(iterator I, T &&Elt)

void truncate(size_type N)

Like resize, but requires that N is less than size().

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

virtual Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const

Returns the desired alignment for ByVal or InAlloca aggregate function arguments in the caller parame...

The instances of the Type class are immutable: once they are created, they are never changed.

LLVM_ABI unsigned getPointerAddressSpace() const

Get the address space of this pointer or pointer vector type.

Type * getScalarType() const

If this is a vector type, return the element type, otherwise return 'this'.

LLVMContext & getContext() const

Return the LLVMContext in which this type was uniqued.

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

LLVM_ABI const Value * stripPointerCasts() const

Strip off pointer casts, all-zero GEPs and address space casts.

constexpr ScalarTy getFixedValue() const

static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)

static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

This is an optimization pass for GlobalISel generic memory operations.

void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())

ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)

CCAssignFn - This function assigns a location for Val, updating State to reflect the change.

LLVM_ABI MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)

Find the def instruction for Reg, folding away any trivial copies.

LLVM_ABI raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

constexpr T divideCeil(U Numerator, V Denominator)

Returns the integer ceil(Numerator / Denominator).

LLVM_ABI LLVM_READNONE LLT getCoverTy(LLT OrigTy, LLT TargetTy)

Return smallest type that covers both OrigTy and TargetTy and is multiple of TargetTy.

uint64_t alignTo(uint64_t Size, Align A)

Returns a multiple of A needed to store Size bytes.

bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)

Test if the given instruction is in a position to be optimized with a tail-call.

ArrayRef(const T &OneElt) -> ArrayRef< T >

OutputIt copy(R &&Range, OutputIt Out)

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

Align commonAlignment(Align A, uint64_t Offset)

Returns the alignment that satisfies both alignments.

LLVM_ABI LLVM_READNONE LLT getGCDType(LLT OrigTy, LLT TargetTy)

Return a type where the total size is the greatest common divisor of OrigTy and TargetTy.

LLVM_ABI LLT getLLTForType(Type &Ty, const DataLayout &DL)

Construct a low-level type based on an LLVM type.

LLVM_ABI Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)

This struct is a compact representation of a valid (non-zero power of two) alignment.

const Value * OrigValue

Optionally track the original IR value for the argument.

SmallVector< Register, 4 > Regs

unsigned OrigArgIndex

Index original Function's argument.

static const unsigned NoArgIndex

Sentinel value for implicit machine-level input arguments.

SmallVector< ISD::ArgFlagsTy, 4 > Flags

void assignValueToReg(Register ValVReg, Register PhysReg, const CCValAssign &VA) override

Provides a default implementation for argument handling.

Definition CallLowering.cpp:1391

Register buildExtensionHint(const CCValAssign &VA, Register SrcReg, LLT NarrowTy)

Insert G_ASSERT_ZEXT/G_ASSERT_SEXT or other hint instruction based on VA, returning the new register ...

Definition CallLowering.cpp:1350

Argument handling is mostly uniform between the four places that make these decisions: function forma...

virtual bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, const ArgInfo &Info, ISD::ArgFlagsTy Flags, CCState &State)

Wrap call to (typically tablegenerated CCAssignFn).

MachineIRBuilder & MIRBuilder

void copyArgumentMemory(const ArgInfo &Arg, Register DstPtr, Register SrcPtr, const MachinePointerInfo &DstPtrInfo, Align DstAlign, const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize, CCValAssign &VA) const

Do a memory copy of MemSize bytes from SrcPtr to DstPtr.

Definition CallLowering.cpp:1276

MachineRegisterInfo & MRI

virtual Register getStackAddress(uint64_t MemSize, int64_t Offset, MachinePointerInfo &MPO, ISD::ArgFlagsTy Flags)=0

Materialize a VReg containing the address of the specified stack-based object.

virtual LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const

Return the in-memory size to write for the argument at VA.

Definition CallLowering.cpp:1253

bool isIncomingArgumentHandler() const

Returns true if the handler is dealing with incoming arguments, i.e.

virtual void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, const MachinePointerInfo &MPO, const CCValAssign &VA)=0

The specified value has been assigned to a stack location.

Register extendRegister(Register ValReg, const CCValAssign &VA, unsigned MaxSizeBits=0)

Extend a register to the location type given in VA, capped at extending to at most MaxSize bits.

Definition CallLowering.cpp:1299

virtual unsigned assignCustomValue(ArgInfo &Arg, ArrayRef< CCValAssign > VAs, std::function< void()> *Thunk=nullptr)

Handle custom values, which may be passed into one or more of VAs.

virtual void assignValueToReg(Register ValVReg, Register PhysReg, const CCValAssign &VA)=0

The specified value has been assigned to a physical register, handle the appropriate COPY (either to ...

static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)

Return the value type corresponding to the specified type.

LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const

This method returns an LLVM type corresponding to the specified EVT.

This class contains a discriminated union of information about pointers in memory operands,...

static LLVM_ABI MachinePointerInfo getUnknownStack(MachineFunction &MF)

Stack memory without other information.

static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)

Return a MachinePointerInfo record that refers to the specified FrameIndex.

This struct is a compact representation of a valid (power of two) or undefined (0) alignment.