LLVM: lib/Target/X86/X86ISelLoweringCall.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

30

31#define DEBUG_TYPE "x86-isel"

32

33using namespace llvm;

34

35STATISTIC(NumTailCalls, "Number of tail calls");

36

37

38

39

40

42 const char *Msg) {

46}

47

48

49

50

52 switch (CC) {

53 default:

54 return false;

58 return true;

59 }

60}

61

62

63

64

68

69static std::pair<MVT, unsigned>

72

73

74 if (NumElts == 2)

75 return {MVT::v2i64, 1};

76 if (NumElts == 4)

77 return {MVT::v4i32, 1};

80 return {MVT::v8i16, 1};

83 return {MVT::v16i8, 1};

84

85

87 return {MVT::v32i8, 1};

88

91 return {MVT::v64i8, 1};

92 return {MVT::v32i8, 2};

93 }

94

95

96 if (isPowerOf2\_32(NumElts) || (NumElts == 64 && !Subtarget.hasBWI()) ||

97 NumElts > 64)

98 return {MVT::i8, NumElts};

99

101}

102

105 EVT VT) const {

109

110 MVT RegisterVT;

111 unsigned NumRegisters;

112 std::tie(RegisterVT, NumRegisters) =

115 return RegisterVT;

116 }

117

119 return MVT::v8f16;

120 }

121

122

123 if ((VT == MVT::f64 || VT == MVT::f80) && !Subtarget.is64Bit() &&

124 !Subtarget.hasX87())

125 return MVT::i32;

126

131

132 if (VT == MVT::bf16)

133 return MVT::f16;

134 }

135

137}

138

141 EVT VT) const {

145

146 MVT RegisterVT;

147 unsigned NumRegisters;

148 std::tie(RegisterVT, NumRegisters) =

151 return NumRegisters;

152 }

153

155 return 1;

156 }

157

158

159

160 if (!Subtarget.is64Bit() && !Subtarget.hasX87()) {

161 if (VT == MVT::f64)

162 return 2;

163 if (VT == MVT::f80)

164 return 3;

165 }

166

171

173}

174

177 unsigned &NumIntermediates, MVT &RegisterVT) const {

178

180 Subtarget.hasAVX512() &&

184 RegisterVT = MVT::i8;

185 IntermediateVT = MVT::i1;

187 return NumIntermediates;

188 }

189

190

191 if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&

193 RegisterVT = MVT::v32i8;

194 IntermediateVT = MVT::v32i1;

195 NumIntermediates = 2;

196 return 2;

197 }

198

199

203

205 NumIntermediates, RegisterVT);

206}

207

210 EVT VT) const {

212 return MVT::i8;

213

214 if (Subtarget.hasAVX512()) {

215

216 EVT LegalVT = VT;

219

220

223

225

226

227

229 if (Subtarget.hasBWI() || EltVT.getSizeInBits() >= 32)

231 }

232 }

233

235}

236

240

241

242

243

244 if (Ty->isIntegerTy(128))

245 return true;

246

247

248 if (Subtarget.is32Bit() && Ty->isFP128Ty())

249 return true;

250

251 return false;

252}

253

254

255

257 if (MaxAlign == 16)

258 return;

260 if (VTy->getPrimitiveSizeInBits().getFixedValue() == 128)

261 MaxAlign = Align(16);

265 if (EltAlign > MaxAlign)

266 MaxAlign = EltAlign;

268 for (auto *EltTy : STy->elements()) {

271 if (EltAlign > MaxAlign)

272 MaxAlign = EltAlign;

273 if (MaxAlign == 16)

274 break;

275 }

276 }

277}

278

279

280

281

282

285 if (Subtarget.is64Bit())

287

288 Align Alignment(4);

289 if (Subtarget.hasSSE1())

291 return Alignment;

292}

293

294

295

296

297

300 const AttributeList &FuncAttributes) const {

301 if (!FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat)) {

302 if (Op.size() >= 16 &&

303 (!Subtarget.isUnalignedMem16Slow() || Op.isAligned(Align(16)))) {

304

305 if (Op.size() >= 64 && Subtarget.hasAVX512() &&

306 (Subtarget.getPreferVectorWidth() >= 512)) {

307 return Subtarget.hasBWI() ? MVT::v64i8 : MVT::v16i32;

308 }

309

310 if (Op.size() >= 32 && Subtarget.hasAVX() &&

311 Subtarget.useLight256BitInstructions()) {

312

313

314

315

316

317 return MVT::v32i8;

318 }

319 if (Subtarget.hasSSE2() && (Subtarget.getPreferVectorWidth() >= 128))

320 return MVT::v16i8;

321

322

323 if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) &&

324 (Subtarget.getPreferVectorWidth() >= 128))

325 return MVT::v4f32;

326 } else if (((Op.isMemcpy() && Op.isMemcpyStrSrc()) || Op.isZeroMemset()) &&

327 Op.size() >= 8 && !Subtarget.is64Bit() && Subtarget.hasSSE2()) {

328

329

330

331

332

333

334 return MVT::f64;

335 }

336 }

337

338

339

340 if (Subtarget.is64Bit() && Op.size() >= 8)

341 return MVT::i64;

342 return MVT::i32;

343}

344

346 if (VT == MVT::f32)

347 return Subtarget.hasSSE1();

348 if (VT == MVT::f64)

349 return Subtarget.hasSSE2();

350 return true;

351}

352

354 return (8 * Alignment.value()) % SizeInBits == 0;

355}

356

359 return true;

361 default:

362

363 return true;

364 case 128:

365 return !Subtarget.isUnalignedMem16Slow();

366 case 256:

367 return !Subtarget.isUnalignedMem32Slow();

368

369 }

370}

371

374 unsigned *Fast) const {

377

379

380

381

382

384 return (Alignment < 16 || !Subtarget.hasSSE41());

385 return false;

386 }

387

388 return true;

389}

390

393 unsigned AddrSpace, Align Alignment,

395 unsigned *Fast) const {

400 nullptr))

401 return true;

402

404 return false;

406 case 128:

408 return true;

410 return true;

411 return false;

412 case 256:

414 return true;

416 return true;

417 return false;

418 case 512:

419 if (Subtarget.hasAVX512())

420 return true;

421 return false;

422 default:

423 return false;

424 }

425 }

426 return true;

427}

428

429

430

431

433

434

439 !Subtarget.isTargetCOFF())

441

442

444}

445

447 return Subtarget.useSoftFloat();

448}

449

452

453

454 if (Subtarget.is64Bit())

455 return;

457 return;

458 unsigned ParamRegs = 0;

460 ParamRegs = M->getNumberRegisterParameters();

461

462

463 for (auto &Arg : Args) {

464 Type *T = Arg.Ty;

465 if (T->isIntOrPtrTy())

467 unsigned numRegs = 1;

469 numRegs = 2;

470 if (ParamRegs < numRegs)

471 return;

472 ParamRegs -= numRegs;

473 Arg.IsInReg = true;

474 }

475 }

476}

477

481 unsigned uid,MCContext &Ctx) const{

483

484

486}

487

488

491 if (!Subtarget.is64Bit())

492

493

496 return Table;

497}

498

499

500

504

505 if (Subtarget.isPICStyleRIPRel() ||

506 (Subtarget.is64Bit() &&

509

510

512}

513

514std::pair<const TargetRegisterClass *, uint8_t>

516 MVT VT) const {

520 default:

522 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:

523 RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;

524 break;

525 case MVT::x86mmx:

526 RRC = &X86::VR64RegClass;

527 break;

528 case MVT::f32: case MVT::f64:

529 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:

530 case MVT::v4f32: case MVT::v2f64:

531 case MVT::v32i8: case MVT::v16i16: case MVT::v8i32: case MVT::v4i64:

532 case MVT::v8f32: case MVT::v4f64:

533 case MVT::v64i8: case MVT::v32i16: case MVT::v16i32: case MVT::v8i64:

534 case MVT::v16f32: case MVT::v8f64:

535 RRC = &X86::VR128XRegClass;

536 break;

537 }

538 return std::make_pair(RRC, Cost);

539}

540

541unsigned X86TargetLowering::getAddressSpace() const {

542 if (Subtarget.is64Bit())

546}

547

552

559

561

562

563

566

567

568 if (Subtarget.isTargetFuchsia())

570

572

573 int Offset = M->getStackProtectorGuardOffset();

574

575

576

577 if (Offset == INT_MAX)

578 Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;

579

580 StringRef GuardReg = M->getStackProtectorGuardReg();

581 if (GuardReg == "fs")

583 else if (GuardReg == "gs")

585

586

587 StringRef GuardSymb = M->getStackProtectorGuardSymbol();

588 if (!GuardSymb.empty()) {

589 GlobalVariable *GV = M->getGlobalVariable(GuardSymb);

590 if (!GV) {

594 nullptr, GuardSymb, nullptr,

596 if (!Subtarget.isTargetDarwin())

597 GV->setDSOLocal(M->getDirectAccessExternalData());

598 }

599 return GV;

600 }

601

603 }

605}

606

608

609 RTLIB::LibcallImpl SecurityCheckCookieLibcall =

611

612 RTLIB::LibcallImpl SecurityCookieVar =

614 if (SecurityCheckCookieLibcall != RTLIB::Unsupported &&

615 SecurityCookieVar != RTLIB::Unsupported) {

616

617

620

621

623 M.getOrInsertFunction(getLibcallImplName(SecurityCheckCookieLibcall),

626

629 F->addParamAttr(0, Attribute::AttrKind::InReg);

630 }

631 return;

632 }

633

634 StringRef GuardMode = M.getStackProtectorGuard();

635

636

637 if ((GuardMode == "tls" || GuardMode.empty()) &&

639 return;

641}

642

645

646

647

648 if (Subtarget.isTargetAndroid()) {

649

650

651 int Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;

653 }

654

655

656 if (Subtarget.isTargetFuchsia()) {

657

658 return SegmentOffset(IRB, 0x18, getAddressSpace());

659 }

660

662}

663

664

665

666

667

668bool X86TargetLowering::CanLowerReturn(

671 const Type *RetTy) const {

673 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);

674 return CCInfo.CheckReturn(Outs, RetCC_X86);

675}

676

678 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };

679 return ScratchRegs;

680}

681

683 static const MCPhysReg RCRegs[] = {X86::FPCW, X86::MXCSR};

684 return RCRegs;

685}

686

687

688

692

693 if (ValVT == MVT::v1i1)

696

697 if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||

698 (ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {

699

700

701

702 EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;

704 if (ValLoc == MVT::i32)

706 return ValToCopy;

707 }

708

709 if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||

710 (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {

711

712

713 return DAG.getBitcast(ValLoc, ValArg);

714 }

715

717}

718

719

724 assert(Subtarget.hasBWI() && "Expected AVX512BW target!");

725 assert(Subtarget.is32Bit() && "Expecting 32 bit target");

728 "The value should reside in two registers");

729

730

732

733

736

737

738 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));

739 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Hi));

740}

741

744 bool isVarArg,

749 X86MachineFunctionInfo *FuncInfo = MF.getInfo();

750

751

752

753

754 bool ShouldDisableCalleeSavedRegister =

757

760

762 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());

763 CCInfo.AnalyzeReturn(Outs, RetCC_X86);

764

766 for (unsigned I = 0, OutsIndex = 0, E = RVLocs.size(); I != E;

767 ++I, ++OutsIndex) {

768 CCValAssign &VA = RVLocs[I];

769 assert(VA.isRegLoc() && "Can only return in registers!");

770

771

772 if (ShouldDisableCalleeSavedRegister)

774

775 SDValue ValToCopy = OutVals[OutsIndex];

777

778

786 else

788 }

791

793 "Unexpected FP-extend for return value.");

794

795

796

797 if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {

798 errorUnsupported(DAG, dl, "SSE register return with SSE disabled");

799 VA.convertToReg(X86::FP0);

800 } else if (!Subtarget.hasSSE2() &&

801 X86::FR64XRegClass.contains(VA.getLocReg()) &&

802 ValVT == MVT::f64) {

803

804

805 errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");

806 VA.convertToReg(X86::FP0);

807 }

808

809

810

811 if (VA.getLocReg() == X86::FP0 ||

813

814

816 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);

818

819 continue;

820 }

821

822

823

824 if (Subtarget.is64Bit()) {

825 if (ValVT == MVT::x86mmx) {

827 ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);

829 ValToCopy);

830

831

832 if (!Subtarget.hasSSE2())

833 ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);

834 }

835 }

836 }

837

840 "Currently the only custom case is when we split v64i1 to 2 regs");

841

843 Subtarget);

844

845

846 if (ShouldDisableCalleeSavedRegister)

848 } else {

850 }

851 }

852

855 RetOps.push_back(Chain);

856

858 MVT::i32));

859

860

861 for (auto &RetVal : RetVals) {

862 if (RetVal.first == X86::FP0 || RetVal.first == X86::FP1) {

864 continue;

865 }

866

867 Chain = DAG.getCopyToReg(Chain, dl, RetVal.first, RetVal.second, Glue);

870 DAG.getRegister(RetVal.first, RetVal.second.getValueType()));

871 }

872

873

874

875

876

877

878

879

880

881

882

883

884

886

887

888

889

890

891

892

893

894

895

896

897

898

899

900

901

902

903

904

907

909 = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?

910 X86::RAX : X86::EAX;

911 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Glue);

913

914

917

918

919

920

921 if (ShouldDisableCalleeSavedRegister &&

925 }

926

927 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();

930 if (I) {

931 for (; *I; ++I) {

932 if (X86::GR64RegClass.contains(*I))

934 else

935 llvm_unreachable("Unexpected register class in CSRsViaCopy!");

936 }

937 }

938

939 RetOps[0] = Chain;

940

941

944

948 return DAG.getNode(opcode, dl, MVT::Other, RetOps);

949}

950

951bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {

952 if (N->getNumValues() != 1 || N->hasNUsesOfValue(1, 0))

953 return false;

954

956 SDNode *Copy = *N->user_begin();

958

959

960 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)

961 return false;

962 TCChain = Copy->getOperand(0);

963 } else if (Copy->getOpcode() != ISD::FP_EXTEND)

964 return false;

965

966 bool HasRet = false;

967 for (const SDNode *U : Copy->users()) {

969 return false;

970

971

972 if (U->getNumOperands() > 4)

973 return false;

974 if (U->getNumOperands() == 4 &&

975 U->getOperand(U->getNumOperands() - 1).getValueType() != MVT::Glue)

976 return false;

977 HasRet = true;

978 }

979

980 if (!HasRet)

981 return false;

982

983 Chain = TCChain;

984 return true;

985}

986

989 MVT ReturnMVT = MVT::i32;

990

991 bool Darwin = Subtarget.getTargetTriple().isOSDarwin();

992 if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {

993

994

995

996

997

998 ReturnMVT = MVT::i8;

999 }

1000

1002 return VT.bitsLT(MinVT) ? MinVT : VT;

1003}

1004

1005

1006

1007

1008

1009

1010

1011

1012

1013

1017 SDValue *InGlue = nullptr) {

1018 assert((Subtarget.hasBWI()) && "Expected AVX512BW target!");

1019 assert(Subtarget.is32Bit() && "Expecting 32 bit target");

1021 "Expecting first location of 64 bit width type");

1023 "The locations should have the same type");

1025 "The values should reside in two registers");

1026

1028 SDValue ArgValueLo, ArgValueHi;

1029

1032

1033

1034 if (nullptr == InGlue) {

1035

1036

1041 } else {

1042

1043

1044 ArgValueLo =

1046 *InGlue = ArgValueLo.getValue(2);

1047 ArgValueHi =

1049 *InGlue = ArgValueHi.getValue(2);

1050 }

1051

1052

1053 Lo = DAG.getBitcast(MVT::v32i1, ArgValueLo);

1054

1055

1056 Hi = DAG.getBitcast(MVT::v32i1, ArgValueHi);

1057

1058

1060}

1061

1062

1063

1064

1068 SDValue ValReturned = ValArg;

1069

1070 if (ValVT == MVT::v1i1)

1072

1073 if (ValVT == MVT::v64i1) {

1074

1075 assert(ValLoc == MVT::i64 && "Expecting only i64 locations");

1076

1077 } else {

1078 MVT MaskLenVT;

1080 case MVT::v8i1:

1081 MaskLenVT = MVT::i8;

1082 break;

1083 case MVT::v16i1:

1084 MaskLenVT = MVT::i16;

1085 break;

1086 case MVT::v32i1:

1087 MaskLenVT = MVT::i32;

1088 break;

1089 default:

1091 }

1092

1094 }

1095 return DAG.getBitcast(ValVT, ValReturned);

1096}

1097

1106

1107

1108

1109

1110SDValue X86TargetLowering::LowerCallResult(

1114 uint32_t *RegMask) const {

1115

1116 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();

1117

1119 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,

1121 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);

1122

1123

1124 for (unsigned I = 0, InsIndex = 0, E = RVLocs.size(); I != E;

1125 ++I, ++InsIndex) {

1126 CCValAssign &VA = RVLocs[I];

1128

1129

1130

1131 if (RegMask) {

1133 RegMask[SubReg / 32] &= ~(1u << (SubReg % 32));

1134 }

1135

1136

1137

1138 if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {

1139 errorUnsupported(DAG, dl, "SSE register return with SSE disabled");

1140 if (VA.getLocReg() == X86::XMM1)

1141 VA.convertToReg(X86::FP1);

1142 else

1143 VA.convertToReg(X86::FP0);

1144 } else if (!Subtarget.hasSSE2() &&

1145 X86::FR64XRegClass.contains(VA.getLocReg()) &&

1146 CopyVT == MVT::f64) {

1147 errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");

1148 if (VA.getLocReg() == X86::XMM1)

1149 VA.convertToReg(X86::FP1);

1150 else

1151 VA.convertToReg(X86::FP0);

1152 }

1153

1154

1155

1156 bool RoundAfterCopy = false;

1157 bool X87Result = VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1;

1159 if (!Subtarget.hasX87())

1161 CopyVT = MVT::f80;

1162 RoundAfterCopy = (CopyVT != VA.getLocVT());

1163 }

1164

1168 "Currently the only custom case is when we split v64i1 to 2 regs");

1169 Val =

1170 getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InGlue);

1171 } else {

1172 Chain =

1173 X87Result

1180 }

1181

1182 if (RoundAfterCopy)

1184

1186

1190 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||

1191 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {

1192

1194 } else

1196 }

1197

1200

1202 }

1203

1204 return Chain;

1205}

1206

1207

1208

1209

1210

1211

1212

1213

1214

1215

1216

1217

1218

1219

1220template

1223

1224 static_assert(std::is_same_v<T, ISD::OutputArg> ||

1225 std::is_same_v<T, ISD::InputArg>,

1226 "requires ISD::OutputArg or ISD::InputArg");

1227

1228

1229

1230 if (!Subtarget.is32Bit())

1231 return false;

1232

1233 if (Args.empty())

1234 return false;

1235

1236

1238 if (!Flags.isSRet() || Flags.isInReg())

1239 return false;

1240

1241

1243 return false;

1244

1245

1247 return false;

1248

1249

1250 return true;

1251}

1252

1253

1254

1255

1260

1262 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),

1263 false, true,

1265}

1266

1267

1273

1274

1276 switch (CC) {

1277

1282

1287

1289 return true;

1290 default:

1292 }

1293}

1294

1295

1296

1301

1302bool X86TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {

1304 return false;

1305

1308 return false;

1309

1310 return true;

1311}

1312

1319

1320 ISD::ArgFlagsTy Flags = Ins[i].Flags;

1323 bool isImmutable = !AlwaysUseMutable && Flags.isByVal();

1324 EVT ValVT;

1326

1327

1328

1329

1330 bool ExtendedInMem =

1333

1336 else

1338

1339

1340

1341

1342

1343 if (Flags.isByVal()) {

1344 unsigned Bytes = Flags.getByValSize();

1345 if (Bytes == 0) Bytes = 1;

1346

1347

1348

1350 true);

1352 }

1353

1354 EVT ArgVT = Ins[i].ArgVT;

1355

1356

1357

1358

1360

1361

1362

1363

1364

1365 if (Flags.isCopyElisionCandidate() &&

1367 !ScalarizedVector) {

1369 if (Ins[i].PartOffset == 0) {

1370

1371

1372

1373

1375 false);

1378 ValVT, dl, Chain, PartAddr,

1380 }

1381

1382

1383

1384

1385

1387 int64_t PartEnd = PartBegin + ValVT.getSizeInBits() / 8;

1391 int64_t ObjEnd = ObjBegin + MFI.getObjectSize(FI);

1392 if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)

1393 break;

1394 }

1399 return DAG.getLoad(ValVT, dl, Chain, Addr,

1402 }

1403 }

1404

1407

1408

1413 }

1414

1415 MaybeAlign Alignment;

1416 if (Subtarget.isTargetWindowsMSVC() && !Subtarget.is64Bit() &&

1417 ValVT != MVT::f80)

1418 Alignment = MaybeAlign(4);

1421 ValVT, dl, Chain, FIN,

1423 Alignment);

1424 return ExtendedInMem

1428 : Val;

1429}

1430

1431

1434 assert(Subtarget.is64Bit());

1435

1437 static const MCPhysReg GPR64ArgRegsWin64[] = {

1438 X86::RCX, X86::RDX, X86::R8, X86::R9

1439 };

1440 return GPR64ArgRegsWin64;

1441 }

1442

1443 static const MCPhysReg GPR64ArgRegs64Bit[] = {

1444 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9

1445 };

1446 return GPR64ArgRegs64Bit;

1447}

1448

1449

1453 assert(Subtarget.is64Bit());

1455

1456

1457

1458

1459 return {};

1460 }

1461

1462 bool isSoftFloat = Subtarget.useSoftFloat();

1463 if (isSoftFloat || !Subtarget.hasSSE1())

1464

1465

1466 return {};

1467

1468 static const MCPhysReg XMMArgRegs64Bit[] = {

1469 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,

1470 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7

1471 };

1472 return XMMArgRegs64Bit;

1473}

1474

1475#ifndef NDEBUG

1479 return A.getValNo() < B.getValNo();

1480 });

1481}

1482#endif

1483

1484namespace {

1485

1486class VarArgsLoweringHelper {

1487public:

1488 VarArgsLoweringHelper(X86MachineFunctionInfo *FuncInfo, const SDLoc &Loc,

1489 SelectionDAG &DAG, const X86Subtarget &Subtarget,

1490 CallingConv::ID CallConv, CCState &CCInfo)

1491 : FuncInfo(FuncInfo), DL(Loc), DAG(DAG), Subtarget(Subtarget),

1492 TheMachineFunction(DAG.getMachineFunction()),

1493 TheFunction(TheMachineFunction.getFunction()),

1494 FrameInfo(TheMachineFunction.getFrameInfo()),

1495 FrameLowering(*Subtarget.getFrameLowering()),

1496 TargLowering(DAG.getTargetLoweringInfo()), CallConv(CallConv),

1497 CCInfo(CCInfo) {}

1498

1499

1500 void lowerVarArgsParameters(SDValue &Chain, unsigned StackSize);

1501

1502private:

1503 void createVarArgAreaAndStoreRegisters(SDValue &Chain, unsigned StackSize);

1504

1505 void forwardMustTailParameters(SDValue &Chain);

1506

1507 bool is64Bit() const { return Subtarget.is64Bit(); }

1508 bool isWin64() const { return Subtarget.isCallingConvWin64(CallConv); }

1509

1510 X86MachineFunctionInfo *FuncInfo;

1511 const SDLoc &DL;

1512 SelectionDAG &DAG;

1513 const X86Subtarget &Subtarget;

1514 MachineFunction &TheMachineFunction;

1515 const Function &TheFunction;

1516 MachineFrameInfo &FrameInfo;

1517 const TargetFrameLowering &FrameLowering;

1518 const TargetLowering &TargLowering;

1519 CallingConv::ID CallConv;

1520 CCState &CCInfo;

1521};

1522}

1523

1524void VarArgsLoweringHelper::createVarArgAreaAndStoreRegisters(

1525 SDValue &Chain, unsigned StackSize) {

1526

1527

1528

1532 FrameInfo.CreateFixedObject(1, StackSize, true));

1533 }

1534

1535

1536

1538

1542 unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);

1543 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);

1544

1545 assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&

1546 "SSE register cannot be used when SSE is disabled!");

1547

1548 if (isWin64()) {

1549

1550

1551 int HomeOffset = FrameLowering.getOffsetOfLocalArea() + 8;

1553 FrameInfo.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));

1554

1555 if (NumIntRegs < 4)

1557 } else {

1558

1559

1560

1564 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, Align(16), false));

1565 }

1566

1568 LiveGPRs;

1570

1571 SDValue ALVal;

1572

1573

1575 Register GPR = TheMachineFunction.addLiveIn(Reg, &X86::GR64RegClass);

1577 }

1578 const auto &AvailableXmms = ArgXMMs.slice(NumXMMRegs);

1579 if (!AvailableXmms.empty()) {

1580 Register AL = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);

1583

1584

1585

1586

1587 TheMachineFunction.getRegInfo().addLiveIn(Reg);

1589 }

1590 }

1591

1592

1596 TargLowering.getPointerTy(DAG.getDataLayout()));

1598 for (SDValue Val : LiveGPRs) {

1609 }

1610

1611

1612 if (!LiveXMMRegs.empty()) {

1628 SaveXMMOps, MVT::i8, StoreMMO));

1629 }

1630

1631 if (!MemOps.empty())

1633 }

1634}

1635

1636void VarArgsLoweringHelper::forwardMustTailParameters(SDValue &Chain) {

1637

1638 MVT VecVT = MVT::Other;

1639

1640 if (Subtarget.useAVX512Regs() &&

1643 VecVT = MVT::v16f32;

1644 else if (Subtarget.hasAVX())

1645 VecVT = MVT::v8f32;

1646 else if (Subtarget.hasSSE2())

1647 VecVT = MVT::v4f32;

1648

1649

1651 MVT IntVT = is64Bit() ? MVT::i64 : MVT::i32;

1653 if (VecVT != MVT::Other)

1655

1656

1659 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);

1660

1661

1662 if (is64Bit() && !isWin64() && !CCInfo.isAllocated(X86::AL)) {

1663 Register ALVReg = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);

1665 }

1666

1667

1669

1671 FR.VReg = TheMachineFunction.getRegInfo().createVirtualRegister(

1672 TargLowering.getRegClassFor(FR.VT));

1673 Chain = DAG.getCopyToReg(Chain, DL, FR.VReg, RegVal);

1674 }

1675}

1676

1677void VarArgsLoweringHelper::lowerVarArgsParameters(SDValue &Chain,

1678 unsigned StackSize) {

1679

1680

1683

1684 if (FrameInfo.hasVAStart())

1685 createVarArgAreaAndStoreRegisters(Chain, StackSize);

1686

1687 if (FrameInfo.hasMustTailInVarArgFunc())

1688 forwardMustTailParameters(Chain);

1689}

1690

1691SDValue X86TargetLowering::LowerFormalArguments(

1696 X86MachineFunctionInfo *FuncInfo = MF.getInfo();

1697

1699 if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() &&

1700 F.getName() == "main")

1702

1704 bool Is64Bit = Subtarget.is64Bit();

1705 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);

1706

1709 "Var args not supported with calling conv' regcall, fastcc, ghc or hipe");

1710

1711

1713 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());

1714

1715

1716 if (IsWin64)

1717 CCInfo.AllocateStack(32, Align(8));

1718

1719 CCInfo.AnalyzeArguments(Ins, CC_X86);

1720

1721

1722

1724 CCInfo.AnalyzeArgumentsSecondPass(Ins, CC_X86);

1725 }

1726

1727

1728

1730 "Argument Location list must be sorted before lowering");

1731

1733 for (unsigned I = 0, InsIndex = 0, E = ArgLocs.size(); I != E;

1734 ++I, ++InsIndex) {

1735 assert(InsIndex < Ins.size() && "Invalid Ins index");

1736 CCValAssign &VA = ArgLocs[I];

1737

1742 VA.getValVT() == MVT::v64i1 &&

1743 "Currently the only custom case is when we split v64i1 to 2 regs");

1744

1745

1746

1747 ArgValue =

1749 } else {

1750 const TargetRegisterClass *RC;

1751 if (RegVT == MVT::i8)

1752 RC = &X86::GR8RegClass;

1753 else if (RegVT == MVT::i16)

1754 RC = &X86::GR16RegClass;

1755 else if (RegVT == MVT::i32)

1756 RC = &X86::GR32RegClass;

1757 else if (Is64Bit && RegVT == MVT::i64)

1758 RC = &X86::GR64RegClass;

1759 else if (RegVT == MVT::f16)

1760 RC = Subtarget.hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;

1761 else if (RegVT == MVT::f32)

1762 RC = Subtarget.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;

1763 else if (RegVT == MVT::f64)

1764 RC = Subtarget.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;

1765 else if (RegVT == MVT::f80)

1766 RC = &X86::RFP80RegClass;

1767 else if (RegVT == MVT::f128)

1768 RC = &X86::VR128RegClass;

1770 RC = &X86::VR512RegClass;

1772 RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;

1774 RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;

1775 else if (RegVT == MVT::x86mmx)

1776 RC = &X86::VR64RegClass;

1777 else if (RegVT == MVT::v1i1)

1778 RC = &X86::VK1RegClass;

1779 else if (RegVT == MVT::v8i1)

1780 RC = &X86::VK8RegClass;

1781 else if (RegVT == MVT::v16i1)

1782 RC = &X86::VK16RegClass;

1783 else if (RegVT == MVT::v32i1)

1784 RC = &X86::VK32RegClass;

1785 else if (RegVT == MVT::v64i1)

1786 RC = &X86::VK64RegClass;

1787 else

1789

1792 }

1793

1794

1795

1796

1805

1807

1812 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||

1813 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {

1814

1816 } else

1818 }

1819 } else {

1821 ArgValue =

1822 LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);

1823 }

1824

1825

1827 !(Ins[I].Flags.isByVal() && VA.isRegLoc())) {

1828 ArgValue =

1829 DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());

1830 }

1831

1833 }

1834

1835 for (unsigned I = 0, E = Ins.size(); I != E; ++I) {

1836 if (Ins[I].Flags.isSwiftAsync()) {

1837 auto X86FI = MF.getInfo();

1839 X86FI->setHasSwiftAsyncContext(true);

1840 else {

1841 int PtrSize = Subtarget.is64Bit() ? 8 : 4;

1842 int FI =

1844 X86FI->setSwiftAsyncContextFrameIdx(FI);

1847 DAG.getFrameIndex(FI, PtrSize == 8 ? MVT::i64 : MVT::i32),

1850 }

1851 }

1852

1853

1854

1856 continue;

1857

1858

1859

1860

1861

1862 if (Ins[I].Flags.isSRet()) {

1864 "SRet return has already been set");

1871 break;

1872 }

1873 }

1874

1875 unsigned StackSize = CCInfo.getStackSize();

1876

1879 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);

1880

1881 if (IsVarArg)

1882 VarArgsLoweringHelper(FuncInfo, dl, DAG, Subtarget, CallConv, CCInfo)

1883 .lowerVarArgsParameters(Chain, StackSize);

1884

1885

1890

1891

1893 } else {

1895

1898 }

1899

1900 if (!Is64Bit) {

1901

1903 }

1904

1906

1911

1912

1913

1914

1915

1916

1917

1918

1920 EHInfo->PSPSymFrameIdx = PSPSymFI;

1921 }

1922 }

1923

1925 F.hasFnAttribute("no_caller_saved_registers")) {

1927 for (std::pair<MCRegister, Register> Pair : MRI.liveins())

1928 MRI.disableCalleeSavedRegister(Pair.first);

1929 }

1930

1932 for (const ISD::InputArg &In : Ins) {

1933 if (In.Flags.isSwiftSelf() || In.Flags.isSwiftAsync() ||

1934 In.Flags.isSwiftError()) {

1936 "Swift attributes can't be used with preserve_none");

1937 break;

1938 }

1939 }

1940

1941 return Chain;

1942}

1943

1949 bool isByVal) const {

1953 StackPtr, PtrOff);

1954 if (isByVal)

1956

1957 MaybeAlign Alignment;

1958 if (Subtarget.isTargetWindowsMSVC() && !Subtarget.is64Bit() &&

1960 Alignment = MaybeAlign(4);

1962 Chain, dl, Arg, PtrOff,

1964 Alignment);

1965}

1966

1967

1968

1969SDValue X86TargetLowering::EmitTailCallLoadRetAddr(

1971 bool Is64Bit, int FPDiff, const SDLoc &dl) const {

1972

1975

1976

1977 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo());

1979}

1980

1981

1982

1985 EVT PtrVT, unsigned SlotSize,

1986 int FPDiff, const SDLoc &dl) {

1987

1988 if (!FPDiff) return Chain;

1989

1990 int NewReturnAddrFI =

1992 false);

1994 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,

1997 return Chain;

1998}

1999

2000

2001

2005 SmallVector<int, 8> Mask;

2006 Mask.push_back(NumElems);

2007 for (unsigned i = 1; i != NumElems; ++i)

2008 Mask.push_back(i);

2010}

2011

2015 SelectionDAG &DAG = CLI.DAG;

2016 SDLoc &dl = CLI.DL;

2017 SmallVectorImplISD::OutputArg &Outs = CLI.Outs;

2018 SmallVectorImpl &OutVals = CLI.OutVals;

2019 SmallVectorImplISD::InputArg &Ins = CLI.Ins;

2024 bool isVarArg = CLI.IsVarArg;

2025 const auto *CB = CLI.CB;

2026

2028 bool Is64Bit = Subtarget.is64Bit();

2029 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);

2030 bool IsSibcall = false;

2033 bool IsCalleePopSRet = !IsGuaranteeTCO && hasCalleePopSRet(Outs, Subtarget);

2034 X86MachineFunctionInfo *X86Info = MF.getInfo();

2036 CB->hasFnAttr("no_caller_saved_registers"));

2037 bool IsIndirectCall = (CB && isa(CB) && CB->isIndirectCall());

2038 bool IsCFICall = IsIndirectCall && CLI.CFIType;

2040

2041

2042

2043

2044 bool IsNoTrackIndirectCall = IsIndirectCall && CB->doesNoCfCheck() &&

2045 M->getModuleFlag("cf-protection-branch");

2046 if (IsNoTrackIndirectCall)

2047 isTailCall = false;

2048

2049 MachineFunction::CallSiteInfo CSInfo;

2052

2053

2055 CSInfo = MachineFunction::CallSiteInfo(*CB);

2056

2057 if (IsIndirectCall && !IsWin64 &&

2058 M->getModuleFlag("import-call-optimization"))

2060 "Indirect calls must have a normal calling convention if "

2061 "Import Call Optimization is enabled");

2062

2063

2065 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());

2066

2067

2068 if (IsWin64)

2069 CCInfo.AllocateStack(32, Align(8));

2070

2071 CCInfo.AnalyzeArguments(Outs, CC_X86);

2072

2073

2074

2076 CCInfo.AnalyzeArgumentsSecondPass(Outs, CC_X86);

2077 }

2078

2080 if (Subtarget.isPICStyleGOT() && !IsGuaranteeTCO && !IsMustTail) {

2081

2082

2083

2084

2085

2087 if (G || (G->getGlobal()->hasLocalLinkage() &&

2088 G->getGlobal()->hasDefaultVisibility()))

2089 isTailCall = false;

2090 }

2091

2092 if (isTailCall && !IsMustTail) {

2093

2094 isTailCall = IsEligibleForTailCallOptimization(CLI, CCInfo, ArgLocs,

2095 IsCalleePopSRet);

2096

2097

2098

2099 if (!IsGuaranteeTCO && isTailCall)

2100 IsSibcall = true;

2101

2102 if (isTailCall)

2103 ++NumTailCalls;

2104 }

2105

2106 if (IsMustTail && !isTailCall)

2107 report_fatal_error("failed to perform tail call elimination on a call "

2108 "site marked musttail");

2109

2111 "Var args not supported with calling convention fastcc, ghc or hipe");

2112

2113

2114 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();

2115 if (IsSibcall)

2116

2117

2118 NumBytes = 0;

2120 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);

2121

2122 int FPDiff = 0;

2123 if (isTailCall &&

2126

2128

2129 FPDiff = NumBytesCallerPushed - NumBytes;

2130

2131

2132

2133 if (FPDiff < X86Info->getTCReturnAddrDelta())

2135 }

2136

2137 unsigned NumBytesToPush = NumBytes;

2138 unsigned NumBytesToPop = NumBytes;

2139

2140

2141

2142

2143 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {

2144 NumBytesToPush = 0;

2145 if (!ArgLocs.back().isMemLoc())

2147 "parameter");

2148 if (ArgLocs.back().getLocMemOffset() != 0)

2149 report_fatal_error("any parameter with the inalloca attribute must be "

2150 "the only memory argument");

2153 "cannot use preallocated attribute on a register "

2154 "parameter");

2156 for (size_t i = 0; i < CLI.OutVals.size(); ++i) {

2157 if (CLI.CB->paramHasAttr(i, Attribute::Preallocated)) {

2158 PreallocatedOffsets.push_back(ArgLocs[i].getLocMemOffset());

2159 }

2160 }

2162 size_t PreallocatedId = MFI->getPreallocatedIdForCallSite(CLI.CB);

2163 MFI->setPreallocatedStackSize(PreallocatedId, NumBytes);

2164 MFI->setPreallocatedArgOffsets(PreallocatedId, PreallocatedOffsets);

2165 NumBytesToPush = 0;

2166 }

2167

2168 if (!IsSibcall && !IsMustTail)

2170 NumBytes - NumBytesToPush, dl);

2171

2173

2174 if (isTailCall && FPDiff)

2175 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,

2176 Is64Bit, FPDiff, dl);

2177

2181

2182

2183

2185 "Argument Location list must be sorted before lowering");

2186

2187

2188

2189 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();

2190 for (unsigned I = 0, OutIndex = 0, E = ArgLocs.size(); I != E;

2191 ++I, ++OutIndex) {

2192 assert(OutIndex < Outs.size() && "Invalid Out index");

2193

2194 ISD::ArgFlagsTy Flags = Outs[OutIndex].Flags;

2195 if (Flags.isInAlloca() || Flags.isPreallocated())

2196 continue;

2197

2198 CCValAssign &VA = ArgLocs[I];

2200 SDValue Arg = OutVals[OutIndex];

2201 bool isByVal = Flags.isByVal();

2202

2203

2209 break;

2212 break;

2218

2219 Arg = DAG.getBitcast(MVT::i64, Arg);

2221 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);

2222 } else

2224 break;

2227 break;

2229 if (isByVal) {

2230

2231

2232

2234 Flags.getByValSize(),

2235 std::max(Align(16), Flags.getNonZeroByValAlign()), false);

2238 Chain =

2240

2241 Arg = StackSlot;

2242 isByVal = false;

2243 } else {

2244

2248 Chain, dl, Arg, SpillSlot,

2250 Arg = SpillSlot;

2251 }

2252 break;

2253 }

2254 }

2255

2258 "Currently the only custom case is when we split v64i1 to 2 regs");

2259

2260 Passv64i1ArgInRegs(dl, DAG, Arg, RegsToPass, VA, ArgLocs[++I], Subtarget);

2264 if (Options.EmitCallSiteInfo)

2266 if (isVarArg && IsWin64) {

2267

2268

2271 case X86::XMM0: ShadowReg = X86::RCX; break;

2272 case X86::XMM1: ShadowReg = X86::RDX; break;

2273 case X86::XMM2: ShadowReg = X86::R8; break;

2274 case X86::XMM3: ShadowReg = X86::R9; break;

2275 }

2276 if (ShadowReg)

2277 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));

2278 }

2279 } else if (!IsSibcall && (!isTailCall || isByVal)) {

2284 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,

2285 dl, DAG, VA, Flags, isByVal));

2286 }

2287 }

2288

2289 if (!MemOpChains.empty())

2291

2292 if (Subtarget.isPICStyleGOT()) {

2293

2294

2295 if (!isTailCall) {

2296

2297

2298

2300 RegsToPass.push_back(std::make_pair(

2303 } else {

2304

2305

2306

2307

2308

2309

2310

2311

2312

2314 if (G && G->getGlobal()->hasLocalLinkage() &&

2315 G->getGlobal()->hasDefaultVisibility())

2316 Callee = LowerGlobalAddress(Callee, DAG);

2318 Callee = LowerExternalSymbol(Callee, DAG);

2319 }

2320 }

2321

2322 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail &&

2323 (Subtarget.hasSSE1() || M->getModuleFlag("SkipRaxSetup"))) {

2324

2325

2326

2327

2328

2329

2330

2331

2332

2333 static const MCPhysReg XMMArgRegs[] = {

2334 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,

2335 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7

2336 };

2337 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);

2338 assert((Subtarget.hasSSE1() || !NumXMMRegs)

2339 && "SSE registers cannot be used when SSE is disabled");

2342 MVT::i8)));

2343 }

2344

2345 if (isVarArg && IsMustTail) {

2347 for (const auto &F : Forwards) {

2349 RegsToPass.push_back(std::make_pair(F.PReg, Val));

2350 }

2351 }

2352

2353

2354

2355

2356 if (!IsSibcall && isTailCall) {

2357

2358

2359

2360

2361

2362

2363

2365

2368 int FI = 0;

2369 for (unsigned I = 0, OutsIndex = 0, E = ArgLocs.size(); I != E;

2370 ++I, ++OutsIndex) {

2371 CCValAssign &VA = ArgLocs[I];

2372

2376 "Expecting custom case only in regcall calling convention");

2377

2378

2379 ++I;

2380 }

2381

2382 continue;

2383 }

2384

2386 SDValue Arg = OutVals[OutsIndex];

2387 ISD::ArgFlagsTy Flags = Outs[OutsIndex].Flags;

2388

2389 if (Flags.isInAlloca() || Flags.isPreallocated())

2390 continue;

2391

2393 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;

2396

2397 if (Flags.isByVal()) {

2398

2404 StackPtr, Source);

2405

2408 } else {

2409

2411 Chain, dl, Arg, FIN,

2413 }

2414 }

2415

2416 if (!MemOpChains2.empty())

2418

2419

2423 }

2424

2425

2426

2428 for (const auto &[Reg, N] : RegsToPass) {

2431 }

2432

2433 bool IsImpCall = false;

2435 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");

2436

2437

2438

2439

2442

2443

2444

2445

2446 Callee = LowerGlobalOrExternal(Callee, DAG, true, &IsImpCall);

2447 } else if (Subtarget.isTarget64BitILP32() &&

2448 Callee.getValueType() == MVT::i32) {

2449

2451 }

2452

2454

2455 if (!IsSibcall && isTailCall && !IsMustTail) {

2456 Chain = DAG.getCALLSEQ_END(Chain, NumBytesToPop, 0, InGlue, dl);

2458 }

2459

2460 Ops.push_back(Chain);

2461 Ops.push_back(Callee);

2462

2463 if (isTailCall)

2465

2466

2467

2468 for (const auto &[Reg, N] : RegsToPass)

2470

2471

2472 const uint32_t *Mask = [&]() {

2473 auto AdaptedCC = CallConv;

2474

2475

2476

2477 if (HasNCSR)

2479

2480

2481 if (CB && CB->hasFnAttr("no_callee_saved_registers"))

2484 }();

2485 assert(Mask && "Missing call preserved mask for calling convention");

2486

2491 }

2496 }

2497

2498

2499

2500

2501

2502

2511 }

2512

2513

2514 uint32_t *RegMask = nullptr;

2515

2516

2517

2518

2519

2522 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();

2523

2524

2527 memcpy(RegMask, Mask, sizeof(RegMask[0]) * RegMaskSize);

2528

2529

2530

2531 if (ShouldDisableArgRegs) {

2532 for (auto const &RegPair : RegsToPass)

2534 RegMask[SubReg / 32] &= ~(1u << (SubReg % 32));

2535 }

2536

2537

2539 } else {

2540

2542 }

2543

2545 Ops.push_back(InGlue);

2546

2547 if (isTailCall) {

2548

2549

2550

2551

2552

2553

2556

2557 if (IsCFICall)

2559

2562 return Ret;

2563 }

2564

2565

2566 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);

2567 if (IsImpCall) {

2569 } else if (IsNoTrackIndirectCall) {

2572

2573

2574

2575 assert(!isTailCall &&

2576 "tail calls cannot be marked with clang.arc.attachedcall");

2577 assert(Is64Bit && "clang.arc.attachedcall is only supported in 64bit mode");

2578

2579

2580

2584 Ops.insert(Ops.begin() + 1, GA);

2586 } else {

2588 }

2589

2590 if (IsCFICall)

2592

2596

2597

2598 if (CLI.CB)

2599 if (MDNode *HeapAlloc = CLI.CB->getMetadata("heapallocsite"))

2601

2602

2603 unsigned NumBytesForCalleeToPop = 0;

2606 NumBytesForCalleeToPop = NumBytes;

2608

2609

2610 NumBytesForCalleeToPop = 4;

2611

2612

2613 if (!IsSibcall) {

2614 Chain = DAG.getCALLSEQ_END(Chain, NumBytesToPop, NumBytesForCalleeToPop,

2615 InGlue, dl);

2617 }

2618

2620 for (const ISD::OutputArg &Out : Outs) {

2621 if (Out.Flags.isSwiftSelf() || Out.Flags.isSwiftAsync() ||

2622 Out.Flags.isSwiftError()) {

2624 "Swift attributes can't be used with preserve_none");

2625 break;

2626 }

2627 }

2628

2629

2630

2631 return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, dl, DAG,

2632 InVals, RegMask);

2633}

2634

2635

2636

2637

2638

2639

2640

2641

2642

2643

2644

2645

2646

2647

2648

2649

2650

2651

2652

2653

2654

2655

2656

2657

2658

2659

2660

2661

2662

2663

2664

2665

2666

2667

2668unsigned

2669X86TargetLowering::GetAlignedArgumentStackSize(const unsigned StackSize,

2671 const Align StackAlignment = Subtarget.getFrameLowering()->getStackAlign();

2672 const uint64_t SlotSize = Subtarget.getRegisterInfo()->getSlotSize();

2673 assert(StackSize % SlotSize == 0 &&

2674 "StackSize must be a multiple of SlotSize");

2675 return alignTo(StackSize + SlotSize, StackAlignment) - SlotSize;

2676}

2677

2678

2679

2680static

2685

2686 for (;;) {

2687

2692 continue;

2693 }

2700 continue;

2701 }

2702 }

2703 break;

2704 }

2705

2706 int FI = INT_MAX;

2710 return false;

2712 if (!Def)

2713 return false;

2714 if (!Flags.isByVal()) {

2715 if (TII->isLoadFromStackSlot(*Def, FI))

2716 return false;

2717 } else {

2718 unsigned Opcode = Def->getOpcode();

2719 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||

2720 Opcode == X86::LEA64_32r) &&

2721 Def->getOperand(1).isFI()) {

2722 FI = Def->getOperand(1).getIndex();

2723 Bytes = Flags.getByValSize();

2724 } else

2725 return false;

2726 }

2728 if (Flags.isByVal())

2729

2730

2731

2732

2733

2734 return false;

2735 SDValue Ptr = Ld->getBasePtr();

2737 if (!FINode)

2738 return false;

2743 Bytes = Flags.getByValSize();

2744 } else

2745 return false;

2746

2747 assert(FI != INT_MAX);

2749 return false;

2750

2752 return false;

2753

2754

2755

2756

2757

2759 return false;

2760

2763

2764

2765 if (Flags.isZExt() != MFI.isObjectZExt(FI) ||

2767 return false;

2768 }

2769 }

2770

2772}

2773

2774static bool

2777 const auto &Outs = CLI.Outs;

2778 const auto &OutVals = CLI.OutVals;

2779

2780

2781

2782 unsigned Pos = 0;

2783 for (unsigned E = Outs.size(); Pos != E; ++Pos)

2784 if (Outs[Pos].Flags.isSRet())

2785 break;

2786

2787 if (Pos == Outs.size())

2788 return false;

2789

2790

2791

2792

2793

2794 SDValue SRetArgVal = OutVals[Pos];

2797 continue;

2800 return true;

2801 }

2802

2803 return false;

2804}

2805

2806

2807

2808

2809

2810

2811bool X86TargetLowering::IsEligibleForTailCallOptimization(

2814 SelectionDAG &DAG = CLI.DAG;

2815 const SmallVectorImplISD::OutputArg &Outs = CLI.Outs;

2816 const SmallVectorImpl &OutVals = CLI.OutVals;

2817 const SmallVectorImplISD::InputArg &Ins = CLI.Ins;

2820 bool isVarArg = CLI.IsVarArg;

2821

2823 return false;

2824

2825

2827 X86MachineFunctionInfo *FuncInfo = MF.getInfo();

2829

2830

2831

2832

2833 if (CallerF.getReturnType()->isX86_FP80Ty() && !CLI.RetTy->isX86_FP80Ty())

2834 return false;

2835

2837 bool CCMatch = CallerCC == CalleeCC;

2838 bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);

2839 bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);

2842

2843

2844

2845

2846 if (IsCalleeWin64 != IsCallerWin64)

2847 return false;

2848

2849 if (IsGuaranteeTCO) {

2851 return true;

2852 return false;

2853 }

2854

2855

2856

2857

2858

2859

2860 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();

2861 if (RegInfo->hasStackRealignment(MF))

2862 return false;

2863

2864

2865

2866

2868

2869

2870

2872 return false;

2873 } else if (IsCalleePopSRet)

2874

2875

2876 return false;

2877

2878

2879

2881 if (isVarArg && !Outs.empty()) {

2882

2883

2884 if (IsCalleeWin64 || IsCallerWin64)

2885 return false;

2886

2887 for (const auto &VA : ArgLocs)

2889 return false;

2890 }

2891

2892

2893

2894

2895 bool Unused = false;

2896 for (const auto &In : Ins) {

2897 if (In.Used) {

2899 break;

2900 }

2901 }

2902 if (Unused) {

2904 CCState RVCCInfo(CalleeCC, false, MF, RVLocs, C);

2905 RVCCInfo.AnalyzeCallResult(Ins, RetCC_X86);

2906 for (const auto &VA : RVLocs) {

2908 return false;

2909 }

2910 }

2911

2912

2915 return false;

2916

2917 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();

2918 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);

2919 if (!CCMatch) {

2920 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);

2921 if (TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))

2922 return false;

2923 }

2924

2925

2926

2927

2928

2929 if (CallerF.hasFnAttribute("no_caller_saved_registers"))

2930 return false;

2931

2932 unsigned StackArgsSize = CCInfo.getStackSize();

2933

2934

2935

2936 if (!Outs.empty()) {

2937 if (StackArgsSize > 0) {

2938

2939

2941 const MachineRegisterInfo *MRI = &MF.getRegInfo();

2942 const X86InstrInfo *TII = Subtarget.getInstrInfo();

2943 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {

2944 const CCValAssign &VA = ArgLocs[I];

2946 ISD::ArgFlagsTy Flags = Outs[I].Flags;

2948 return false;

2951 TII, VA))

2952 return false;

2953 }

2954 }

2955 }

2956

2958

2959

2960

2961

2962

2965 PositionIndependent)) {

2966 unsigned NumInRegs = 0;

2967

2968

2969 unsigned MaxInRegs = PositionIndependent ? 2 : 3;

2970

2971 for (const auto &VA : ArgLocs) {

2973 continue;

2975 switch (Reg) {

2976 default: break;

2977 case X86::EAX: case X86::EDX: case X86::ECX:

2978 if (++NumInRegs == MaxInRegs)

2979 return false;

2980 break;

2981 }

2982 }

2983 }

2984

2985 const MachineRegisterInfo &MRI = MF.getRegInfo();

2987 return false;

2988 }

2989

2990 bool CalleeWillPop =

2993

2995

2996 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;

2997 if (!CalleePopMatches)

2998 return false;

2999 } else if (CalleeWillPop && StackArgsSize > 0) {

3000

3001 return false;

3002 }

3003

3004 return true;

3005}

3006

3007

3008

3010 bool is64Bit, bool IsVarArg, bool GuaranteeTCO) {

3011

3012

3014 return true;

3015

3017 default:

3018 return false;

3024 }

3025}

unsigned const MachineRegisterInfo * MRI

static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls)

Return true if the calling convention is one that we can guarantee TCO for.

static bool mayTailCallThisCC(CallingConv::ID CC)

Return true if we might ever do TCO for calls with this calling convention.

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

const TargetInstrInfo & TII

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)

CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...

Module.h This file contains the declarations for the Module class.

const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]

const MCPhysReg ArgGPRs[]

static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt)

Return true if the function is being made into a tailcall target by changing its ABI.

static bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, const M68kInstrInfo *TII, const CCValAssign &VA)

Return true if the given stack call argument is already available in the same position (relatively) o...

Machine Check Debug Module

Register const TargetRegisterInfo * TRI

Promote Memory to Register

This file defines ARC utility functions which are used by various parts of the compiler.

static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)

static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign)

getMaxByValAlign - Helper for getByValTypeAlignment to determine the desired ByVal argument alignment...

static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)

This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...

#define STATISTIC(VARNAME, DESC)

static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)

static bool is64Bit(const char *name)

static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc, const SDLoc &DL, SelectionDAG &DAG)

Lowers masks values (v*i1) to the local register values.

Definition X86ISelLoweringCall.cpp:689

static void Passv64i1ArgInRegs(const SDLoc &DL, SelectionDAG &DAG, SDValue &Arg, SmallVectorImpl< std::pair< Register, SDValue > > &RegsToPass, CCValAssign &VA, CCValAssign &NextVA, const X86Subtarget &Subtarget)

Breaks v64i1 value into two registers and adds the new node to the DAG.

Definition X86ISelLoweringCall.cpp:720

static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA, SDValue &Root, SelectionDAG &DAG, const SDLoc &DL, const X86Subtarget &Subtarget, SDValue *InGlue=nullptr)

Reads two 32 bit registers and creates a 64 bit mask value.

Definition X86ISelLoweringCall.cpp:1014

static ArrayRef< MCPhysReg > get64BitArgumentXMMs(MachineFunction &MF, CallingConv::ID CallConv, const X86Subtarget &Subtarget)

Definition X86ISelLoweringCall.cpp:1450

static bool isSortedByValueNo(ArrayRef< CCValAssign > ArgLocs)

Definition X86ISelLoweringCall.cpp:1476

static ArrayRef< MCPhysReg > get64BitArgumentGPRs(CallingConv::ID CallConv, const X86Subtarget &Subtarget)

Definition X86ISelLoweringCall.cpp:1432

static SDValue getPopFromX87Reg(SelectionDAG &DAG, SDValue Chain, const SDLoc &dl, Register Reg, EVT VT, SDValue Glue)

Definition X86ISelLoweringCall.cpp:1098

static bool mayBeSRetTailCallCompatible(const TargetLowering::CallLoweringInfo &CLI, Register CallerSRetReg)

Definition X86ISelLoweringCall.cpp:2775

static std::pair< MVT, unsigned > handleMaskRegisterForCallingConv(unsigned NumElts, CallingConv::ID CC, const X86Subtarget &Subtarget)

Definition X86ISelLoweringCall.cpp:70

static bool shouldDisableRetRegFromCSR(CallingConv::ID CC)

Returns true if a CC can dynamically exclude a register from the list of callee-saved-registers (Targ...

Definition X86ISelLoweringCall.cpp:51

static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl, const char *Msg)

Call this when the user attempts to do something unsupported, like returning a double without SSE2 en...

Definition X86ISelLoweringCall.cpp:41

static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue RetAddrFrIdx, EVT PtrVT, unsigned SlotSize, int FPDiff, const SDLoc &dl)

Emit a store of the return address if tail call optimization is performed and it is required (FPDiff!...

Definition X86ISelLoweringCall.cpp:1983

static bool hasCalleePopSRet(const SmallVectorImpl< T > &Args, const X86Subtarget &Subtarget)

Determines whether Args, either a set of outgoing arguments to a call, or a set of incoming args of a...

Definition X86ISelLoweringCall.cpp:1221

static bool shouldDisableArgRegFromCSR(CallingConv::ID CC)

Returns true if a CC can dynamically exclude a register from the list of callee-saved-registers (Targ...

Definition X86ISelLoweringCall.cpp:65

static bool hasStackGuardSlotTLS(const Triple &TargetTriple)

Definition X86ISelLoweringCall.cpp:548

static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT, const EVT &ValLoc, const SDLoc &DL, SelectionDAG &DAG)

The function will lower a register of various sizes (8/16/32/64) to a mask value of the expected size...

Definition X86ISelLoweringCall.cpp:1065

static Constant * SegmentOffset(IRBuilderBase &IRB, int Offset, unsigned AddressSpace)

Definition X86ISelLoweringCall.cpp:553

static bool isBitAligned(Align Alignment, uint64_t SizeInBits)

Definition X86ISelLoweringCall.cpp:353

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

size_t size() const

size - Get the array size.

ArrayRef< T > slice(size_t N, size_t M) const

slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.

const Function * getParent() const

Return the enclosing method, or null if none.

CCState - This class holds information needed while lowering arguments and return values.

static LLVM_ABI bool resultsCompatible(CallingConv::ID CalleeCC, CallingConv::ID CallerCC, MachineFunction &MF, LLVMContext &C, const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn CalleeFn, CCAssignFn CallerFn)

Returns true if the results of the two calling conventions are compatible.

uint64_t getStackSize() const

Returns the size of the currently allocated portion of the stack.

CCValAssign - Represent assignment of one arg/retval to a location.

void convertToReg(MCRegister Reg)

Register getLocReg() const

LocInfo getLocInfo() const

int64_t getLocMemOffset() const

CallingConv::ID getCallingConv() const

LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const

Determine whether the argument or parameter has the given attribute.

LLVM_ABI bool isMustTailCall() const

Tests if this call site must be tail call optimized.

This class represents a function call, abstracting a target machine's calling convention.

static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)

uint64_t getZExtValue() const

Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...

This is an important base class in LLVM.

A parsed version of the target data layout string in and methods for querying it.

LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const

Returns the offset in bytes between successive objects of the specified type, including alignment pad...

Diagnostic information for unsupported feature in backend.

A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...

bool hasPersonalityFn() const

Check whether this function has a personality function.

Constant * getPersonalityFn() const

Get the personality function associated with this function.

bool hasFnAttribute(Attribute::AttrKind Kind) const

Return true if the function has the attribute.

Module * getParent()

Get the module that this global value is contained inside of...

void setDSOLocal(bool Local)

@ ExternalLinkage

Externally visible function.

Common base class shared among various IRBuilders.

BasicBlock * GetInsertBlock() const

LLVMContext & getContext() const

PointerType * getPtrTy(unsigned AddrSpace=0)

Fetch the type representing a pointer.

MDNode * getMetadata(unsigned KindID) const

Get the metadata of given kind attached to this Instruction.

This is an important class for using LLVM in a threaded context.

LLVM_ABI void diagnose(const DiagnosticInfo &DI)

Report a message to the currently installed diagnostic handler.

This class is used to represent ISD::LOAD nodes.

Context object for machine code objects.

Base class for the full range of assembler expressions which are needed for parsing.

static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())

@ INVALID_SIMPLE_VALUE_TYPE

unsigned getVectorNumElements() const

bool isVector() const

Return true if this is a vector value type.

bool is512BitVector() const

Return true if this is a 512-bit vector type.

TypeSize getSizeInBits() const

Returns the size of the specified MVT in bits.

uint64_t getFixedSizeInBits() const

Return the size of the specified fixed width value type in bits.

MVT getVectorElementType() const

MVT getScalarType() const

If this is a vector, return the element type, otherwise return this.

The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.

LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)

Create a new object at a fixed location on the stack.

void setObjectZExt(int ObjectIdx, bool IsZExt)

LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)

Create a new statically sized stack object, returning a nonnegative identifier to represent it.

void setObjectSExt(int ObjectIdx, bool IsSExt)

bool isImmutableObjectIndex(int ObjectIdx) const

Returns true if the specified index corresponds to an immutable object.

void setHasTailCall(bool V=true)

bool isObjectZExt(int ObjectIdx) const

int64_t getObjectSize(int ObjectIdx) const

Return the size of the specified object.

bool isObjectSExt(int ObjectIdx) const

int64_t getObjectOffset(int ObjectIdx) const

Return the assigned stack offset of the specified object from the incoming stack pointer.

bool isFixedObjectIndex(int ObjectIdx) const

Returns true if the specified index corresponds to a fixed stack object.

int getObjectIndexBegin() const

Return the minimum frame object index.

const WinEHFuncInfo * getWinEHFuncInfo() const

getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.

MCSymbol * getPICBaseSymbol() const

getPICBaseSymbol - Return a function-local symbol to represent the PIC base.

MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)

getMachineMemOperand - Allocate a new MachineMemOperand.

MachineFrameInfo & getFrameInfo()

getFrameInfo - Return the frame info object for the current function.

uint32_t * allocateRegMask()

Allocate and initialize a register mask with NumRegister bits.

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

const DataLayout & getDataLayout() const

Return the DataLayout attached to the Module associated to this MF.

Function & getFunction()

Return the LLVM function that this machine code represents.

Ty * getInfo()

getInfo - Keep track of various per-function pieces of information for backends that would like to do...

Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)

addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...

const TargetMachine & getTarget() const

getTarget - Return the target machine this machine code is compiled with

Representation of each machine instruction.

@ EK_Custom32

EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...

@ EK_LabelDifference64

EK_LabelDifference64 - Each entry is the address of the block minus the address of the jump table.

A description of a memory reference used in the backend.

Flags

Flags values. These may be or'd together.

@ MOLoad

The memory access reads data.

@ MONonTemporal

The memory access is non-temporal.

@ MOStore

The memory access writes data.

static unsigned getRegMaskSize(unsigned NumRegs)

Returns number of elements needed for a regmask array.

static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)

clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.

MachineRegisterInfo - Keep track of information for virtual and physical registers,...

LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")

createVirtualRegister - Create and return a new virtual register in the function with the specified r...

LLVM_ABI void disableCalleeSavedRegister(MCRegister Reg)

Disables the register from the list of CSRs.

A Module instance is used to store all the information related to an LLVM module.

static PointerType * getUnqual(Type *ElementType)

This constructs a pointer to an object of the specified type in the default address space (address sp...

Wrapper class representing virtual and physical registers.

constexpr bool isVirtual() const

Return true if the specified register number is in the virtual register namespace.

Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...

const DebugLoc & getDebugLoc() const

Represents one node in the SelectionDAG.

void setCFIType(uint32_t Type)

iterator_range< user_iterator > users()

Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.

SDNode * getNode() const

get the SDNode which holds the desired result

SDValue getValue(unsigned R) const

EVT getValueType() const

Return the ValueType of the referenced return value.

TypeSize getValueSizeInBits() const

Returns the size of the value in bits.

const SDValue & getOperand(unsigned i) const

MVT getSimpleValueType() const

Return the simple ValueType of the referenced return value.

unsigned getOpcode() const

This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...

SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)

LLVM_ABI SDValue getStackArgumentTokenFactor(SDValue Chain)

Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.

SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)

LLVM_ABI SDVTList getVTList(EVT VT)

Return an SDVTList that represents the list of values specified.

LLVM_ABI SDValue getRegister(Register Reg, EVT VT)

LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)

Loads are not normal binary operators: their result type is not determined by their operands,...

LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())

Creates a MemIntrinsicNode that may produce a result and takes a list of operands.

LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)

void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)

Set NoMergeSiteInfo to be associated with Node if NoMerge is true.

SDValue getUNDEF(EVT VT)

Return an UNDEF node. UNDEF does not have a useful SDLoc.

SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)

Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).

LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)

Return a bitcast using the SDLoc of the value operand, and casting to the provided type.

SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)

const DataLayout & getDataLayout() const

void addHeapAllocSite(const SDNode *Node, MDNode *MD)

Set HeapAllocSite to be associated with Node.

LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)

Create a ConstantSDNode wrapping a constant value.

SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)

LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())

Helper function to build ISD::STORE nodes.

SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)

Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...

const TargetMachine & getTarget() const

LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)

LLVM_ABI SDValue getValueType(EVT)

LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)

Gets or creates the specified node.

SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)

MachineFunction & getMachineFunction() const

LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)

LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)

void addCallSiteInfo(const SDNode *Node, CallSiteInfo &&CallInfo)

Set CallSiteInfo to be associated with Node.

LLVMContext * getContext() const

LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)

Create a stack temporary based on the size in bytes and the alignment.

SDValue getEntryNode() const

Return the token chain corresponding to the entry of the function.

LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)

Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.

LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)

Return an ISD::VECTOR_SHUFFLE node.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

StringRef - Represent a constant reference to a string, i.e.

constexpr bool empty() const

empty - Check if the string is empty.

Class to represent struct types.

virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const

Return the register class that should be used for the specified value type.

virtual Value * getSafeStackPointerLocation(IRBuilderBase &IRB) const

Returns the target-specific address of the unsafe stack pointer.

const TargetMachine & getTargetMachine() const

virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const

Certain targets require unusual breakdowns of certain types.

virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const

Certain combinations of ABIs, Targets and features require that types are legal for some operations a...

virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const

Certain targets such as MIPS require that some types such as vectors are always broken down into scal...

virtual Value * getIRStackGuard(IRBuilderBase &IRB) const

If the target has a standard location for the stack protector guard, returns the address of that loca...

bool isTypeLegal(EVT VT) const

Return true if the target has native support for the specified value type.

virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const

Return the pointer type for the given address space, defaults to the pointer type from the data layou...

virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const

Return the largest legal super-reg register class of the register class for the specified type and it...

RTLIB::LibcallImpl getLibcallImpl(RTLIB::Libcall Call) const

Get the libcall impl routine name for the specified libcall.

static StringRef getLibcallImplName(RTLIB::LibcallImpl Call)

Get the libcall routine name for the specified libcall implementation.

LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const

Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...

std::vector< ArgListEntry > ArgListTy

MVT getRegisterType(MVT VT) const

Return the type of registers that this ValueType will eventually require.

virtual void insertSSPDeclarations(Module &M) const

Inserts necessary declarations for SSP (stack protection) purpose.

virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const

This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...

bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const

Check whether parameters to a call that are passed in callee saved registers are the same as from the...

bool isPositionIndependent() const

virtual ArrayRef< MCPhysReg > getRoundingControlRegisters() const

Returns a 0 terminated array of rounding control registers that can be attached into strict FP call.

virtual unsigned getJumpTableEncoding() const

Return the entry encoding for a jump table in the current function.

CodeModel::Model getCodeModel() const

Returns the code model.

unsigned GuaranteedTailCallOpt

GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.

unsigned EmitCallGraphSection

Emit section containing call graph metadata.

TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...

Triple - Helper class for working with autoconf configuration names.

bool isAndroid() const

Tests whether the target is Android.

bool isOSMSVCRT() const

Is this a "Windows" OS targeting a "MSVCRT.dll" environment.

bool isOSGlibc() const

Tests whether the OS uses glibc.

The instances of the Type class are immutable: once they are created, they are never changed.

static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)

bool isX86_FP80Ty() const

Return true if this is x86 long double.

static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)

static LLVM_ABI Type * getVoidTy(LLVMContext &C)

Value * getOperand(unsigned i) const

LLVM Value Representation.

void setBytesToPopOnReturn(unsigned bytes)

void setBPClobberedByCall(bool C)

void setFPClobberedByCall(bool C)

unsigned getVarArgsGPOffset() const

int getRegSaveFrameIndex() const

Register getSRetReturnReg() const

void setVarArgsGPOffset(unsigned Offset)

void setRegSaveFrameIndex(int Idx)

void setForceFramePointer(bool forceFP)

void setSRetReturnReg(Register Reg)

unsigned getVarArgsFPOffset() const

void setArgumentStackSize(unsigned size)

SmallVectorImpl< ForwardedRegister > & getForwardedMustTailRegParms()

void setTCReturnAddrDelta(int delta)

void setVarArgsFrameIndex(int Idx)

void setBPClobberedByInvoke(bool C)

void setFPClobberedByInvoke(bool C)

unsigned getBytesToPopOnReturn() const

void setVarArgsFPOffset(unsigned Offset)

const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override

Register getStackRegister() const

unsigned getSlotSize() const

Register getFramePtr() const

Returns physical register used as frame pointer.

Register getBaseRegister() const

const uint32_t * getNoPreservedMask() const override

const Triple & getTargetTriple() const

bool useAVX512Regs() const

bool isCallingConvWin64(CallingConv::ID CC) const

std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const override

Return the largest legal super-reg register class of the register class for the specified type and it...

Definition X86ISelLoweringCall.cpp:515

SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override

Returns relocation base for the given PIC jumptable.

Definition X86ISelLoweringCall.cpp:489

unsigned getJumpTableEncoding() const override

Return the entry encoding for a jump table in the current function.

Definition X86ISelLoweringCall.cpp:432

bool isMemoryAccessFast(EVT VT, Align Alignment) const

Definition X86ISelLoweringCall.cpp:357

bool useSoftFloat() const override

Definition X86ISelLoweringCall.cpp:446

const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const override

This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...

Definition X86ISelLoweringCall.cpp:502

bool isSafeMemOpType(MVT VT) const override

Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline.

Definition X86ISelLoweringCall.cpp:345

bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const override

For some targets, an LLVM struct type must be broken down into multiple simple types,...

Definition X86ISelLoweringCall.cpp:237

Value * getIRStackGuard(IRBuilderBase &IRB) const override

If the target has a standard location for the stack protector cookie, returns the address of that loc...

Definition X86ISelLoweringCall.cpp:560

Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const override

Return the desired alignment for ByVal aggregate function arguments in the caller parameter area.

Definition X86ISelLoweringCall.cpp:283

MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override

Certain combinations of ABIs, Targets and features require that types are legal for some operations a...

Definition X86ISelLoweringCall.cpp:103

bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override

Returns true if the target allows unaligned memory accesses of the specified type.

Definition X86ISelLoweringCall.cpp:372

EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &FuncAttributes) const override

It returns EVT::Other if the type should be determined using generic target-independent logic.

Definition X86ISelLoweringCall.cpp:298

unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override

Certain targets such as MIPS require that some types such as vectors are always broken down into scal...

Definition X86ISelLoweringCall.cpp:175

void markLibCallAttributes(MachineFunction *MF, unsigned CC, ArgListTy &Args) const override

Definition X86ISelLoweringCall.cpp:450

Value * getSafeStackPointerLocation(IRBuilderBase &IRB) const override

Return true if the target stores SafeStack pointer at a fixed offset in some non-standard address spa...

Definition X86ISelLoweringCall.cpp:644

bool isScalarFPTypeInSSEReg(EVT VT) const

Return true if the specified scalar FP type is computed in an SSE register, not on the X87 floating p...

unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override

Certain targets require unusual breakdowns of certain types.

Definition X86ISelLoweringCall.cpp:139

bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override

This function returns true if the memory access is aligned or if the target allows this specific unal...

Definition X86ISelLoweringCall.cpp:391

SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const

EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override

Return the value type to use for ISD::SETCC.

Definition X86ISelLoweringCall.cpp:208

EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const override

For types supported by the target, this is an identity function.

void insertSSPDeclarations(Module &M) const override

Inserts necessary declarations for SSP (stack protection) purpose.

Definition X86ISelLoweringCall.cpp:607

const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, unsigned uid, MCContext &Ctx) const override

Definition X86ISelLoweringCall.cpp:479

constexpr ScalarTy getFixedValue() const

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

constexpr char Align[]

Key for Kernel::Arg::Metadata::mAlign.

constexpr std::underlying_type_t< E > Mask()

Get a bitmask with 1s in all places up to the high-order bit of E's largest value.

CallingConv Namespace - This namespace contains an enum with a value for the well-known calling conve...

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

@ X86_64_SysV

The C convention as specified in the x86-64 supplement to the System V ABI, used on most non-Windows ...

@ HiPE

Used by the High-Performance Erlang Compiler (HiPE).

@ Swift

Calling convention for Swift.

@ PreserveMost

Used for runtime calls that preserves most registers.

@ X86_INTR

x86 hardware interrupt context.

@ GHC

Used by the Glasgow Haskell Compiler (GHC).

@ X86_ThisCall

Similar to X86_StdCall.

@ PreserveAll

Used for runtime calls that preserves (almost) all registers.

@ X86_StdCall

stdcall is mostly used by the Win32 API.

@ Fast

Attempts to make calls as fast as possible (e.g.

@ X86_VectorCall

MSVC calling convention that passes vectors and vector aggregates in SSE registers.

@ Intel_OCL_BI

Used for Intel OpenCL built-ins.

@ PreserveNone

Used for runtime calls that preserves none general registers.

@ Tail

Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...

@ Win64

The C convention as implemented on Windows/x86-64 and AArch64.

@ SwiftTail

This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...

@ X86_RegCall

Register calling convention used for parameters transfer optimization.

@ C

The default llvm calling convention, compatible with C.

@ X86_FastCall

'fast' analog of X86_StdCall.

NodeType

ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.

@ ADD

Simple integer binary arithmetic operators.

@ ANY_EXTEND

ANY_EXTEND - Used for integer types. The high bits are undefined.

@ CONCAT_VECTORS

CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...

@ SIGN_EXTEND

Conversion operators.

@ SCALAR_TO_VECTOR

SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...

@ CopyFromReg

CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...

@ EXTRACT_VECTOR_ELT

EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...

@ CopyToReg

CopyToReg - This node has three operands: a chain, a register number to set to this value,...

@ ZERO_EXTEND

ZERO_EXTEND - Used for integer types, zeroing the new bits.

@ TokenFactor

TokenFactor - This node takes multiple tokens as input and produces a single token result.

@ FP_ROUND

X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...

@ TRUNCATE

TRUNCATE - Completely drop the high bits.

@ AssertSext

AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...

LLVM_ABI LegalityPredicate isVector(unsigned TypeIdx)

True iff the specified type index is a vector.

@ RET_GLUE

Return with a glue operand.

@ IRET

Return from interrupt. Operand 0 is the number of bytes to pop.

@ CALL

These operations represent an abstract X86 call instruction, which includes a bunch of information.

@ GlobalBaseReg

On Darwin, this node represents the result of the popl at function entry, used for PIC code.

@ TC_RETURN

Tail call return.

@ NT_CALL

Same as call except it adds the NoTrack prefix.

@ MOVDQ2Q

Copies a 64-bit value from the low word of an XMM vector to an MMX vector.

@ POP_FROM_X87_REG

The same as ISD::CopyFromReg except that this node makes it explicit that it may lower to an x87 FPU ...

bool isExtendedSwiftAsyncFrameSupported(const X86Subtarget &Subtarget, const MachineFunction &MF)

True if the target supports the extended frame for async Swift functions.

bool isCalleePop(CallingConv::ID CallingConv, bool is64Bit, bool IsVarArg, bool GuaranteeTCO)

Determines whether the callee is required to pop its own arguments.

Definition X86ISelLoweringCall.cpp:3009

std::optional< Function * > getAttachedARCFunction(const CallBase *CB)

This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...

bool hasAttachedCallOpBundle(const CallBase *CB)

This is an optimization pass for GlobalISel generic memory operations.

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

void append_range(Container &C, Range &&R)

Wrapper function to append range R to container C.

constexpr bool isPowerOf2_32(uint32_t Value)

Return true if the argument is a power of two > 0.

LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)

bool is_sorted(R &&Range, Compare C)

Wrapper function around std::is_sorted to check if elements in a range R are sorted with respect to a...

LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)

See if the given exception handling personality function is one that we understand.

class LLVM_GSL_OWNER SmallVector

Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

bool isFuncletEHPersonality(EHPersonality Pers)

Returns true if this is a personality function that invokes handler funclets (which must return to it...

uint16_t MCPhysReg

An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...

uint64_t alignTo(uint64_t Size, Align A)

Returns a multiple of A needed to store Size bytes.

DWARFExpression::Operation Op

ArrayRef(const T &OneElt) -> ArrayRef< T >

bool CC_X86(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)

bool RetCC_X86(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

This struct is a compact representation of a valid (non-zero power of two) alignment.

constexpr uint64_t value() const

This is a hole in the type system and should not be abused.

static constexpr Align Constant()

Allow constructions of constexpr Align.

EVT changeVectorElementTypeToInteger() const

Return a vector with the same number of elements as this vector, but with the element type converted ...

TypeSize getStoreSize() const

Return the number of bytes overwritten by a store of the specified value type.

static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)

Returns the EVT that represents a vector NumElements in length, where each element is of type VT.

bool bitsLT(EVT VT) const

Return true if this has less bits than VT.

ElementCount getVectorElementCount() const

TypeSize getSizeInBits() const

Return the size of the specified value type in bits.

MVT getSimpleVT() const

Return the SimpleValueType held in the specified simple EVT.

bool is128BitVector() const

Return true if this is a 128-bit vector type.

bool is512BitVector() const

Return true if this is a 512-bit vector type.

bool isVector() const

Return true if this is a vector value type.

bool is256BitVector() const

Return true if this is a 256-bit vector type.

EVT getVectorElementType() const

Given a vector type, return the type of each element.

EVT changeVectorElementType(EVT EltVT) const

Return a VT for a vector type whose attributes match ourselves with the exception of the element type...

unsigned getVectorNumElements() const

Given a vector type, return the number of elements it contains.

Describes a register that needs to be forwarded from the prologue to a musttail call.

SmallVector< ArgRegPair, 1 > ArgRegPairs

Vector of call argument and its forwarding register.

This class contains a discriminated union of information about pointers in memory operands,...

static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)

Stack pointer relative access.

static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)

Return a MachinePointerInfo record that refers to the specified FrameIndex.

This represents a list of ValueType's that has been intern'd by a SelectionDAG.

This structure contains all information that is necessary for lowering calls.

SmallVector< ISD::InputArg, 32 > Ins

const ConstantInt * CFIType

SmallVector< ISD::OutputArg, 32 > Outs

SmallVector< SDValue, 32 > OutVals

Type * RetTy

Same as OrigRetTy, or partially legalized for soft float libcalls.