LLVM: lib/Target/X86/X86RegisterInfo.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

14

37

38using namespace llvm;

39

40#define GET_REGINFO_TARGET_DESC

41#include "X86GenRegisterInfo.inc"

42

45 cl::desc("Enable use of a base pointer for complex stack frames"));

46

50 cl::desc("Disable two address hints for register "

51 "allocation"));

52

54

59 (TT.isX86_64() ? X86::RIP : X86::EIP)) {

61

62

63 Is64Bit = TT.isX86_64();

64 IsTarget64BitLP64 = Is64Bit && !TT.isX32();

65 IsWin64 = Is64Bit && TT.isOSWindows();

66 IsUEFI64 = Is64Bit && TT.isUEFI();

67

68

69

70

71 if (Is64Bit) {

72 SlotSize = 8;

73

74

75

76 bool Use64BitReg = !TT.isX32();

77 StackPtr = Use64BitReg ? X86::RSP : X86::ESP;

78 FramePtr = Use64BitReg ? X86::RBP : X86::EBP;

79 BasePtr = Use64BitReg ? X86::RBX : X86::EBX;

80 } else {

81 SlotSize = 4;

82 StackPtr = X86::ESP;

83 FramePtr = X86::EBP;

84 BasePtr = X86::ESI;

85 }

86}

87

90 unsigned Idx) const {

91

92

93 if (!Is64Bit && Idx == X86::sub_8bit)

94 Idx = X86::sub_8bit_hi;

95

96

97 return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);

98}

99

103 unsigned SubIdx) const {

104

105 if (!Is64Bit && SubIdx == X86::sub_8bit) {

106 A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);

107 if (A)

108 return nullptr;

109 }

110 return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);

111}

112

116

117

118

119

120

121

122

123

124 if (RC == &X86::GR8_NOREXRegClass)

125 return RC;

126

127

128

130 return RC;

131

133

137 do {

138 switch (Super->getID()) {

139 case X86::FR32RegClassID:

140 case X86::FR64RegClassID:

141

143 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))

144 return Super;

145 break;

146 case X86::VR128RegClassID:

147 case X86::VR256RegClassID:

148

149 if (!Subtarget.hasVLX() &&

150 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))

151 return Super;

152 break;

153 case X86::VR128XRegClassID:

154 case X86::VR256XRegClassID:

155

156 if (Subtarget.hasVLX() &&

157 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))

158 return Super;

159 break;

160 case X86::FR32XRegClassID:

161 case X86::FR64XRegClassID:

162

164 getRegSizeInBits(*Super) == getRegSizeInBits(*RC))

165 return Super;

166 break;

167 case X86::GR8RegClassID:

168 case X86::GR16RegClassID:

169 case X86::GR32RegClassID:

170 case X86::GR64RegClassID:

171 case X86::GR8_NOREX2RegClassID:

172 case X86::GR16_NOREX2RegClassID:

173 case X86::GR32_NOREX2RegClassID:

174 case X86::GR64_NOREX2RegClassID:

175 case X86::RFP32RegClassID:

176 case X86::RFP64RegClassID:

177 case X86::RFP80RegClassID:

178 case X86::VR512_0_15RegClassID:

179 case X86::VR512RegClassID:

180

181

182 if (getRegSizeInBits(*Super) == getRegSizeInBits(*RC))

183 return Super;

184 }

185 if (I != E) {

187 ++I;

188 } else {

189 Super = nullptr;

190 }

191 } while (Super);

192 return RC;

193}

194

197 assert(Kind == 0 && "this should only be used for default cases");

198 if (IsTarget64BitLP64)

199 return &X86::GR64RegClass;

200

201

202

203

204 return Is64Bit ? &X86::LOW32_ADDR_ACCESSRegClass : &X86::GR32RegClass;

205}

206

209 if (RC == &X86::CCRRegClass) {

210 if (Is64Bit)

211 return &X86::GR64RegClass;

212 else

213 return &X86::GR32RegClass;

214 }

215 return RC;

216}

217

218unsigned

222

223 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;

224 switch (RC->getID()) {

225 default:

226 return 0;

227 case X86::GR32RegClassID:

228 return 4 - FPDiff;

229 case X86::GR64RegClassID:

230 return 12 - FPDiff;

231 case X86::VR128RegClassID:

232 return Is64Bit ? 10 : 4;

233 case X86::VR64RegClassID:

234 return 4;

235 }

236}

237

240 assert(MF && "MachineFunction required");

241

244 bool HasSSE = Subtarget.hasSSE1();

245 bool HasAVX = Subtarget.hasAVX();

246 bool HasAVX512 = Subtarget.hasAVX512();

248

250

251

252

255

256

257

259 return CSR_NoRegs_SaveList;

260

261 switch (CC) {

264 return CSR_NoRegs_SaveList;

266 if (HasAVX)

267 return CSR_64_AllRegs_AVX_SaveList;

268 return CSR_64_AllRegs_SaveList;

270 return IsWin64 ? CSR_Win64_RT_MostRegs_SaveList

271 : CSR_64_RT_MostRegs_SaveList;

273 if (HasAVX)

274 return CSR_64_RT_AllRegs_AVX_SaveList;

275 return CSR_64_RT_AllRegs_SaveList;

277 return CSR_64_NoneRegs_SaveList;

279 if (Is64Bit)

281 CSR_64_CXX_TLS_Darwin_PE_SaveList : CSR_64_TLS_Darwin_SaveList;

282 break;

284 if (HasAVX512 && IsWin64)

285 return CSR_Win64_Intel_OCL_BI_AVX512_SaveList;

286 if (HasAVX512 && Is64Bit)

287 return CSR_64_Intel_OCL_BI_AVX512_SaveList;

288 if (HasAVX && IsWin64)

289 return CSR_Win64_Intel_OCL_BI_AVX_SaveList;

290 if (HasAVX && Is64Bit)

291 return CSR_64_Intel_OCL_BI_AVX_SaveList;

292 if (!HasAVX && !IsWin64 && Is64Bit)

293 return CSR_64_Intel_OCL_BI_SaveList;

294 break;

295 }

297 if (Is64Bit) {

298 if (IsWin64) {

299 return (HasSSE ? CSR_Win64_RegCall_SaveList :

300 CSR_Win64_RegCall_NoSSE_SaveList);

301 } else {

302 return (HasSSE ? CSR_SysV64_RegCall_SaveList :

303 CSR_SysV64_RegCall_NoSSE_SaveList);

304 }

305 } else {

306 return (HasSSE ? CSR_32_RegCall_SaveList :

307 CSR_32_RegCall_NoSSE_SaveList);

308 }

310 assert(!Is64Bit && "CFGuard check mechanism only used on 32-bit X86");

311 return (HasSSE ? CSR_Win32_CFGuard_Check_SaveList

312 : CSR_Win32_CFGuard_Check_NoSSE_SaveList);

314 if (Is64Bit)

315 return CSR_64_MostRegs_SaveList;

316 break;

318 if (!HasSSE)

319 return CSR_Win64_NoSSE_SaveList;

320 return CSR_Win64_SaveList;

322 if (!Is64Bit)

323 return CSR_32_SaveList;

324 return IsWin64 ? CSR_Win64_SwiftTail_SaveList : CSR_64_SwiftTail_SaveList;

326 if (CallsEHReturn)

327 return CSR_64EHRet_SaveList;

328 return CSR_64_SaveList;

330 if (Is64Bit) {

331 if (HasAVX512)

332 return CSR_64_AllRegs_AVX512_SaveList;

333 if (HasAVX)

334 return CSR_64_AllRegs_AVX_SaveList;

335 if (HasSSE)

336 return CSR_64_AllRegs_SaveList;

337 return CSR_64_AllRegs_NoSSE_SaveList;

338 } else {

339 if (HasAVX512)

340 return CSR_32_AllRegs_AVX512_SaveList;

341 if (HasAVX)

342 return CSR_32_AllRegs_AVX_SaveList;

343 if (HasSSE)

344 return CSR_32_AllRegs_SSE_SaveList;

345 return CSR_32_AllRegs_SaveList;

346 }

347 default:

348 break;

349 }

350

351 if (Is64Bit) {

353 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);

354 if (IsSwiftCC)

355 return IsWin64 ? CSR_Win64_SwiftError_SaveList

356 : CSR_64_SwiftError_SaveList;

357

358 if (IsWin64 || IsUEFI64)

359 return HasSSE ? CSR_Win64_SaveList : CSR_Win64_NoSSE_SaveList;

360 if (CallsEHReturn)

361 return CSR_64EHRet_SaveList;

362 return CSR_64_SaveList;

363 }

364

365 return CallsEHReturn ? CSR_32EHRet_SaveList : CSR_32_SaveList;

366}

367

370 return Is64Bit ? CSR_IPRA_64_SaveList : CSR_IPRA_32_SaveList;

371}

372

375 assert(MF && "Invalid MachineFunction pointer.");

378 return CSR_64_CXX_TLS_Darwin_ViaCopy_SaveList;

379 return nullptr;

380}

381

386 bool HasSSE = Subtarget.hasSSE1();

387 bool HasAVX = Subtarget.hasAVX();

388 bool HasAVX512 = Subtarget.hasAVX512();

389

390 switch (CC) {

393 return CSR_NoRegs_RegMask;

395 if (HasAVX)

396 return CSR_64_AllRegs_AVX_RegMask;

397 return CSR_64_AllRegs_RegMask;

399 return IsWin64 ? CSR_Win64_RT_MostRegs_RegMask : CSR_64_RT_MostRegs_RegMask;

401 if (HasAVX)

402 return CSR_64_RT_AllRegs_AVX_RegMask;

403 return CSR_64_RT_AllRegs_RegMask;

405 return CSR_64_NoneRegs_RegMask;

407 if (Is64Bit)

408 return CSR_64_TLS_Darwin_RegMask;

409 break;

411 if (HasAVX512 && IsWin64)

412 return CSR_Win64_Intel_OCL_BI_AVX512_RegMask;

413 if (HasAVX512 && Is64Bit)

414 return CSR_64_Intel_OCL_BI_AVX512_RegMask;

415 if (HasAVX && IsWin64)

416 return CSR_Win64_Intel_OCL_BI_AVX_RegMask;

417 if (HasAVX && Is64Bit)

418 return CSR_64_Intel_OCL_BI_AVX_RegMask;

419 if (!HasAVX && !IsWin64 && Is64Bit)

420 return CSR_64_Intel_OCL_BI_RegMask;

421 break;

422 }

424 if (Is64Bit) {

425 if (IsWin64) {

426 return (HasSSE ? CSR_Win64_RegCall_RegMask :

427 CSR_Win64_RegCall_NoSSE_RegMask);

428 } else {

429 return (HasSSE ? CSR_SysV64_RegCall_RegMask :

430 CSR_SysV64_RegCall_NoSSE_RegMask);

431 }

432 } else {

433 return (HasSSE ? CSR_32_RegCall_RegMask :

434 CSR_32_RegCall_NoSSE_RegMask);

435 }

437 assert(!Is64Bit && "CFGuard check mechanism only used on 32-bit X86");

438 return (HasSSE ? CSR_Win32_CFGuard_Check_RegMask

439 : CSR_Win32_CFGuard_Check_NoSSE_RegMask);

441 if (Is64Bit)

442 return CSR_64_MostRegs_RegMask;

443 break;

445 return CSR_Win64_RegMask;

447 if (!Is64Bit)

448 return CSR_32_RegMask;

449 return IsWin64 ? CSR_Win64_SwiftTail_RegMask : CSR_64_SwiftTail_RegMask;

451 return CSR_64_RegMask;

453 if (Is64Bit) {

454 if (HasAVX512)

455 return CSR_64_AllRegs_AVX512_RegMask;

456 if (HasAVX)

457 return CSR_64_AllRegs_AVX_RegMask;

458 if (HasSSE)

459 return CSR_64_AllRegs_RegMask;

460 return CSR_64_AllRegs_NoSSE_RegMask;

461 } else {

462 if (HasAVX512)

463 return CSR_32_AllRegs_AVX512_RegMask;

464 if (HasAVX)

465 return CSR_32_AllRegs_AVX_RegMask;

466 if (HasSSE)

467 return CSR_32_AllRegs_SSE_RegMask;

468 return CSR_32_AllRegs_RegMask;

469 }

470 default:

471 break;

472 }

473

474

475

476 if (Is64Bit) {

479 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);

480 if (IsSwiftCC)

481 return IsWin64 ? CSR_Win64_SwiftError_RegMask : CSR_64_SwiftError_RegMask;

482

483 return (IsWin64 || IsUEFI64) ? CSR_Win64_RegMask : CSR_64_RegMask;

484 }

485

486 return CSR_32_RegMask;

487}

488

491 return CSR_NoRegs_RegMask;

492}

493

495 return CSR_64_TLS_Darwin_RegMask;

496}

497

501

502

504

505

507

508

510

511

512 for (const MCPhysReg &SubReg : subregs_inclusive(X86::RSP))

514

515

517

518

519 for (const MCPhysReg &SubReg : subregs_inclusive(X86::RIP))

521

522

527 "Frame pointer clobbered by function invoke is not supported.");

528

529 for (const MCPhysReg &SubReg : subregs_inclusive(X86::RBP))

531 }

532

533

537 "Stack realignment in presence of dynamic "

538 "allocas is not supported with "

539 "this calling convention.");

540

542 for (const MCPhysReg &SubReg : subregs_inclusive(BasePtr))

544 }

545

546

553

554

555 for (unsigned n = 0; n != 8; ++n)

557

558

559 if (!Is64Bit) {

560

561

570

571 for (unsigned n = 0; n != 8; ++n) {

572

575

576

579 }

580 }

582 for (unsigned n = 0; n != 16; ++n) {

584 ++AI)

586 }

587 }

588

589

591 Reserved.set(X86::R16, X86::R31WH + 1);

592

598 }

599

601 {X86::SIL, X86::DIL, X86::BPL, X86::SPL,

602 X86::SIH, X86::DIH, X86::BPH, X86::SPH}));

604}

605

607

608

609

610

611

612

613

614

615

616

617

618 static_assert((X86::R15WH + 1 == X86::YMM0) && (X86::YMM15 + 1 == X86::K0) &&

619 (X86::K6_K7 + 1 == X86::TMMCFG) &&

620 (X86::TMM7 + 1 == X86::R16) &&

621 (X86::R31WH + 1 == X86::NUM_TARGET_REGS),

622 "Register number may be incorrect");

623

625 if (ST.hasEGPR())

626 return X86::NUM_TARGET_REGS;

627 if (ST.hasAMXTILE())

628 return X86::TMM7 + 1;

629 if (ST.hasAVX512())

630 return X86::K6_K7 + 1;

631 if (ST.hasAVX())

632 return X86::YMM15 + 1;

633 return X86::R15WH + 1;

634}

635

641 return TRI.isSuperOrSubRegisterEq(RegA, RegB);

642 };

643

644 if (!ST.is64Bit())

647 [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }) ||

648 (ST.hasMMX() && X86::VR64RegClass.contains(Reg));

649

651

653 return true;

654

657 [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }))

658 return true;

659

662 [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }))

663 return true;

664

665 if (ST.hasSSE1() &&

667 X86::XMM3, X86::XMM4, X86::XMM5,

668 X86::XMM6, X86::XMM7},

669 [&](MCRegister &RegA) { return IsSubReg(RegA, Reg); }))

670 return true;

671

672 return X86GenRegisterInfo::isArgumentRegister(MF, Reg);

673}

674

679

680

681 if (TRI.isSuperOrSubRegisterEq(X86::RSP, PhysReg))

682 return true;

683

684

686 if (TFI.hasFP(MF) && TRI.isSuperOrSubRegisterEq(X86::RBP, PhysReg))

687 return true;

688

689 return X86GenRegisterInfo::isFixedRegister(MF, PhysReg);

690}

691

693 return RC->getID() == X86::TILERegClassID;

694}

695

697

698

699

700

701

702

703

704 assert(!(Mask[X86::EFLAGS / 32] & (1U << (X86::EFLAGS % 32))) &&

705 "EFLAGS are not live-out from a patchpoint.");

706

707

708 for (auto Reg : {X86::EFLAGS, X86::RIP, X86::EIP, X86::IP})

709 Mask[Reg / 32] &= ~(1U << (Reg % 32));

710}

711

712

713

714

715

719

722

723

725 return false;

726

728 return true;

729

731

733 return false;

734

735

736

737

738

739

740 bool CantUseFP = hasStackRealignment(MF);

741 return CantUseFP && CantUseSP(MFI);

742}

743

746 return false;

747

750

751

752

753 if (MRI->canReserveReg(FramePtr))

754 return false;

755

756

757

759 return MRI->canReserveReg(BasePtr);

760 return true;

761}

762

765 return true;

766

768}

769

770

771

772

773

776 unsigned Opc = II->getOpcode();

777

778 if ((Opc != X86::LEA32r && Opc != X86::LEA64r && Opc != X86::LEA64_32r) ||

779 MI.getOperand(2).getImm() != 1 ||

780 MI.getOperand(3).getReg() != X86::NoRegister ||

781 MI.getOperand(4).getImm() != 0 ||

782 MI.getOperand(5).getReg() != X86::NoRegister)

783 return false;

784 Register BasePtr = MI.getOperand(1).getReg();

785

786

787

788 if (Opc == X86::LEA64_32r)

790 Register NewDestReg = MI.getOperand(0).getReg();

792 MI.getParent()->getParent()->getSubtarget<X86Subtarget>().getInstrInfo();

793 TII->copyPhysReg(*MI.getParent(), II, MI.getDebugLoc(), NewDestReg, BasePtr,

794 MI.getOperand(1).isKill());

795 MI.eraseFromParent();

796 return true;

797}

798

800 switch (MI.getOpcode()) {

801 case X86::CATCHRET:

802 case X86::CLEANUPRET:

803 return true;

804 default:

805 return false;

806 }

808}

809

811 unsigned FIOperandNum,

813 int FIOffset) const {

815 unsigned Opc = MI.getOpcode();

816 if (Opc == TargetOpcode::LOCAL_ESCAPE) {

819 return;

820 }

821

822 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);

823

824

825

826 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {

827 assert(BasePtr == FramePtr && "Expected the FP as base register");

828 int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;

829 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);

830 return;

831 }

832

833 if (MI.getOperand(FIOperandNum + 3).isImm()) {

834

835 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());

836 int Offset = FIOffset + Imm;

837 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&

838 "Requesting 64-bit offset in 32-bit immediate!");

840 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);

841 } else {

842

844 FIOffset + (uint64_t)MI.getOperand(FIOperandNum + 3).getOffset();

845 MI.getOperand(FIOperandNum + 3).setOffset(Offset);

846 }

847}

848

849bool

851 int SPAdj, unsigned FIOperandNum,

857 bool IsEHFuncletEpilogue = MBBI == MBB.end() ? false

860 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();

861

862

863 int64_t FIOffset;

865 if (MI.isReturn()) {

866 assert((!hasStackRealignment(MF) ||

868 "Return instruction can only reference SP relative frame objects");

869 FIOffset =

871 } else if (TFI->Is64Bit && (MBB.isEHFuncletEntry() || IsEHFuncletEpilogue)) {

873 } else {

875 }

876

877

878

879

880

881

882 unsigned Opc = MI.getOpcode();

883 if (Opc == TargetOpcode::LOCAL_ESCAPE) {

886 return false;

887 }

888

889

890

891

892

893 Register MachineBasePtr = BasePtr;

894 if (Opc == X86::LEA64_32r && X86::GR32RegClass.contains(BasePtr))

896

897

898

899 MI.getOperand(FIOperandNum).ChangeToRegister(MachineBasePtr, false);

900

901 if (BasePtr == StackPtr)

902 FIOffset += SPAdj;

903

904

905

906 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {

907 assert(BasePtr == FramePtr && "Expected the FP as base register");

908 int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;

909 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);

910 return false;

911 }

912

913 if (MI.getOperand(FIOperandNum+3).isImm()) {

916 int64_t Imm = MI.getOperand(FIOperandNum + 3).getImm();

917 int64_t Offset = FIOffset + Imm;

919

920

921 if (Is64Bit && !FitsIn32Bits) {

922 assert(RS && "RegisterScavenger was NULL");

923

924 RS->enterBasicBlockEnd(MBB);

925 RS->backward(std::next(II));

926

927 Register ScratchReg = RS->scavengeRegisterBackwards(

928 X86::GR64RegClass, II, false, 0,

929 true);

930 assert(ScratchReg != 0 && "scratch reg was 0");

931 RS->setRegUsed(ScratchReg);

932

934

935 MI.getOperand(FIOperandNum + 3).setImm(0);

936 MI.getOperand(FIOperandNum + 2).setReg(ScratchReg);

937

938 return false;

939 }

940

941

942 if (!Is64Bit && !FitsIn32Bits) {

943 MI.emitGenericError("64-bit offset calculated but target is 32-bit");

944

946 return false;

947 }

948

950 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);

951 } else {

952

954 (uint64_t)MI.getOperand(FIOperandNum+3).getOffset();

955 MI.getOperand(FIOperandNum + 3).setOffset(Offset);

956 }

957 return false;

958}

959

965 return 0;

966

968 return 0;

969

970 switch (MBBI->getOpcode()) {

971 default:

972 return 0;

973 case TargetOpcode::PATCHABLE_RET:

974 case X86::RET:

975 case X86::RET32:

976 case X86::RET64:

977 case X86::RETI32:

978 case X86::RETI64:

979 case X86::TCRETURNdi:

980 case X86::TCRETURNri:

981 case X86::TCRETURN_WIN64ri:

982 case X86::TCRETURN_HIPE32ri:

983 case X86::TCRETURNmi:

984 case X86::TCRETURNdi64:

985 case X86::TCRETURNri64:

986 case X86::TCRETURNri64_ImpCall:

987 case X86::TCRETURNmi64:

988 case X86::TCRETURN_WINmi64:

989 case X86::EH_RETURN:

990 case X86::EH_RETURN64: {

994

996 Is64Bit ? X86::GR64_NOSPRegClass : X86::GR32_NOSPRegClass;

998 if (LRU.available(Reg) && MRI.isReserved(Reg))

999 return Reg;

1000 }

1001 }

1002 }

1003

1004 return 0;

1005}

1006

1009 return TFI->hasFP(MF) ? FramePtr : StackPtr;

1010}

1011

1018 return FrameReg;

1019}

1020

1027 return StackReg;

1028}

1029

1033 return VRM->getShape(VirtReg);

1034

1037 unsigned OpCode = MI->getOpcode();

1038 switch (OpCode) {

1039 default:

1040 llvm_unreachable("Unexpected machine instruction on tile register!");

1041 break;

1042 case X86::COPY: {

1043 Register SrcReg = MI->getOperand(1).getReg();

1046 return Shape;

1047 }

1048

1049 case X86::PTILELOADDV:

1050 case X86::PTILELOADDT1V:

1051 case X86::PTDPBSSDV:

1052 case X86::PTDPBSUDV:

1053 case X86::PTDPBUSDV:

1054 case X86::PTDPBUUDV:

1055 case X86::PTILEZEROV:

1056 case X86::PTDPBF16PSV:

1057 case X86::PTDPFP16PSV:

1058 case X86::PTCMMIMFP16PSV:

1059 case X86::PTCMMRLFP16PSV:

1060 case X86::PTILELOADDRSV:

1061 case X86::PTILELOADDRST1V:

1062 case X86::PTMMULTF32PSV:

1063 case X86::PTDPBF8PSV:

1064 case X86::PTDPBHF8PSV:

1065 case X86::PTDPHBF8PSV:

1066 case X86::PTDPHF8PSV: {

1071 return Shape;

1072 }

1073 }

1074}

1075

1085 VirtReg, Order, Hints, MF, VRM, Matrix);

1088

1089 unsigned ID = RC.getID();

1090

1091 if (!VRM)

1092 return BaseImplRetVal;

1093

1094 if (ID != X86::TILERegClassID) {

1096 TRI.isGeneralPurposeRegisterClass(&RC))

1097 return BaseImplRetVal;

1098

1099

1101

1102 auto TryAddNDDHint = [&](const MachineOperand &MO) {

1105 if (PhysReg && MRI->isReserved(PhysReg) && is\_contained(Hints, PhysReg))

1106 TwoAddrHints.insert(PhysReg);

1107 };

1108

1109

1110

1111 for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) {

1114 continue;

1115 unsigned OpIdx = MI.getOperandNo(&MO);

1116 if (OpIdx == 0) {

1117 assert(MI.getOperand(1).isReg());

1118 TryAddNDDHint(MI.getOperand(1));

1119 if (MI.isCommutable()) {

1120 assert(MI.getOperand(2).isReg());

1121 TryAddNDDHint(MI.getOperand(2));

1122 }

1123 } else if (OpIdx == 1) {

1124 TryAddNDDHint(MI.getOperand(0));

1125 } else if (MI.isCommutable() && OpIdx == 2) {

1126 TryAddNDDHint(MI.getOperand(0));

1127 }

1128 }

1129

1130 for (MCPhysReg OrderReg : Order)

1131 if (TwoAddrHints.count(OrderReg))

1133

1134 return BaseImplRetVal;

1135 }

1136

1138 auto AddHint = [&](MCPhysReg PhysReg) {

1142 return;

1143 }

1145 if (PhysShape == VirtShape)

1147 };

1148

1151 for (auto Hint : CopyHints) {

1152 if (RC.contains(Hint) && MRI->isReserved(Hint))

1153 AddHint(Hint);

1154 }

1155 for (MCPhysReg PhysReg : Order) {

1156 if (!CopyHints.count(PhysReg) && RC.contains(PhysReg) &&

1157 MRI->isReserved(PhysReg))

1158 AddHint(PhysReg);

1159 }

1160

1161#define DEBUG_TYPE "tile-hint"

1163 dbgs() << "Hints for virtual register " << format_hex(VirtReg, 8) << "\n";

1164 for (auto Hint : Hints) {

1165 dbgs() << "tmm" << Hint << ",";

1166 }

1167 dbgs() << "\n";

1168 });

1169#undef DEBUG_TYPE

1170

1171 return true;

1172}

1173

1176 switch (RC->getID()) {

1177 default:

1178 return RC;

1179 case X86::GR8RegClassID:

1180 return &X86::GR8_NOREX2RegClass;

1181 case X86::GR16RegClassID:

1182 return &X86::GR16_NOREX2RegClass;

1183 case X86::GR32RegClassID:

1184 return &X86::GR32_NOREX2RegClass;

1185 case X86::GR64RegClassID:

1186 return &X86::GR64_NOREX2RegClass;

1187 case X86::GR32_NOSPRegClassID:

1188 return &X86::GR32_NOREX2_NOSPRegClass;

1189 case X86::GR64_NOSPRegClassID:

1190 return &X86::GR64_NOREX2_NOSPRegClass;

1191 }

1192}

1193

1195 switch (RC->getID()) {

1196 default:

1197 return false;

1198 case X86::GR8_NOREX2RegClassID:

1199 case X86::GR16_NOREX2RegClassID:

1200 case X86::GR32_NOREX2RegClassID:

1201 case X86::GR64_NOREX2RegClassID:

1202 case X86::GR32_NOREX2_NOSPRegClassID:

1203 case X86::GR64_NOREX2_NOSPRegClassID:

1204 case X86::GR64_with_sub_16bit_in_GR16_NOREX2RegClassID:

1205 return true;

1206 }

1207}

unsigned const MachineRegisterInfo * MRI

static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

const TargetInstrInfo & TII

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

MachineBasicBlock MachineBasicBlock::iterator MBBI

This file implements the BitVector class.

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

static cl::opt< bool > EnableBasePointer("m68k-use-base-pointer", cl::Hidden, cl::init(true), cl::desc("Enable use of a base pointer for complex stack frames"))

static bool CantUseSP(const MachineFrameInfo &MFI)

Register const TargetRegisterInfo * TRI

Promote Memory to Register

MachineInstr unsigned OpIdx

uint64_t IntrinsicInst * II

This file declares the machine register scavenger class.

static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)

This file defines the SmallSet class.

cl::opt< bool > X86EnableAPXForRelocation

static cl::opt< bool > EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true), cl::desc("Enable use of a base pointer for complex stack frames"))

static bool tryOptimizeLEAtoMOV(MachineBasicBlock::iterator II)

Definition X86RegisterInfo.cpp:774

static cl::opt< bool > DisableRegAllocNDDHints("x86-disable-regalloc-hints-for-ndd", cl::Hidden, cl::init(false), cl::desc("Disable two address hints for register " "allocation"))

static ShapeT getTileShape(Register VirtReg, VirtRegMap *VRM, const MachineRegisterInfo *MRI)

Definition X86RegisterInfo.cpp:1030

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

CallingConv::ID getCallingConv() const

getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...

bool hasFnAttribute(Attribute::AttrKind Kind) const

Return true if the function has the attribute.

A set of register units used to track register liveness.

bool available(MCRegister Reg) const

Returns true if no part of physical register Reg is live.

LLVM_ABI void stepBackward(const MachineInstr &MI)

Updates liveness when stepping backwards over the instruction MI.

LLVM_ABI void addLiveOuts(const MachineBasicBlock &MBB)

Adds registers living out of block MBB.

LLVM_ABI void reportError(SMLoc L, const Twine &Msg)

MCRegAliasIterator enumerates all registers aliasing Reg.

Wrapper class representing physical registers. Should be passed by value.

static constexpr unsigned NoRegister

MachineInstrBundleIterator< MachineInstr > iterator

The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.

bool hasVarSizedObjects() const

This method may be called any time after instruction selection is complete to determine if the stack ...

bool hasOpaqueSPAdjustment() const

Returns true if the function contains opaque dynamic stack adjustments.

bool isFixedObjectIndex(int ObjectIdx) const

Returns true if the specified index corresponds to a fixed stack object.

const TargetSubtargetInfo & getSubtarget() const

getSubtarget - Return the subtarget for which this machine code is being compiled.

MachineFrameInfo & getFrameInfo()

getFrameInfo - Return the frame info object for the current function.

MCContext & getContext() const

bool callsEHReturn() const

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

Function & getFunction()

Return the LLVM function that this machine code represents.

Ty * getInfo()

getInfo - Keep track of various per-function pieces of information for backends that would like to do...

const TargetMachine & getTarget() const

getTarget - Return the target machine this machine code is compiled with

const MachineInstrBuilder & addImm(int64_t Val) const

Add a new immediate operand.

Representation of each machine instruction.

MachineOperand class - Representation of each machine instruction operand.

LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)

ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.

MachineRegisterInfo - Keep track of information for virtual and physical registers,...

Wrapper class representing virtual and physical registers.

constexpr bool isPhysical() const

Return true if the specified register number is in the physical register namespace.

Represents a location in source code.

SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...

size_type count(const T &V) const

count - Return 1 if the element is in the set, 0 otherwise.

std::pair< const_iterator, bool > insert(const T &V)

insert - Insert an element into the set if it isn't already there.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

static StackOffset getFixed(int64_t Fixed)

bool hasFP(const MachineFunction &MF) const

hasFP - Return true if the specified function should have a dedicated frame pointer register.

LLVM_ABI bool FramePointerIsReserved(const MachineFunction &MF) const

FramePointerIsReserved - This returns true if the frame pointer must always either point to a new fra...

unsigned getID() const

Return the register class ID number.

bool contains(Register Reg) const

Return true if the specified register is included in this register class.

ArrayRef< unsigned > superclasses() const

Returns a list of super-classes.

TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...

virtual bool canRealignStack(const MachineFunction &MF) const

True if the stack can be realigned for the target.

virtual bool shouldRealignStack(const MachineFunction &MF) const

True if storage within the function requires the stack pointer to be aligned more than the normal cal...

virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const

Get a list of 'hint' registers that the register allocator should try first when allocating a physica...

Triple - Helper class for working with autoconf configuration names.

bool hasShape(Register virtReg) const

ShapeT getShape(Register virtReg) const

MCRegister getPhys(Register virtReg) const

returns the physical register mapped to the specified virtual register

void assignVirt2Shape(Register virtReg, ShapeT shape)

StackOffset getFrameIndexReferenceSP(const MachineFunction &MF, int FI, Register &SPReg, int Adjustment) const

StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override

getFrameIndexReference - This method should return the base register and offset used to reference a f...

bool Is64Bit

Is64Bit implies that x86_64 instructions are available.

int getWin64EHFrameIndexRef(const MachineFunction &MF, int FI, Register &SPReg) const

X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...

bool getBPClobberedByInvoke() const

bool hasPreallocatedCall() const

MachineInstr * getStackPtrSaveMI() const

bool getFPClobberedByInvoke() const

bool hasBasePointer(const MachineFunction &MF) const

Definition X86RegisterInfo.cpp:720

const TargetRegisterClass * getPointerRegClass(unsigned Kind=0) const override

getPointerRegClass - Returns a TargetRegisterClass used for pointer values.

Definition X86RegisterInfo.cpp:196

const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const

Definition X86RegisterInfo.cpp:373

bool canRealignStack(const MachineFunction &MF) const override

Definition X86RegisterInfo.cpp:744

BitVector getReservedRegs(const MachineFunction &MF) const override

getReservedRegs - Returns a bitset indexed by physical register number indicating if a register is a ...

Definition X86RegisterInfo.cpp:498

Register getPtrSizedFrameRegister(const MachineFunction &MF) const

Definition X86RegisterInfo.cpp:1013

bool shouldRealignStack(const MachineFunction &MF) const override

Definition X86RegisterInfo.cpp:763

unsigned getNumSupportedRegs(const MachineFunction &MF) const override

Return the number of registers for the function.

Definition X86RegisterInfo.cpp:606

const MCPhysReg * getIPRACSRegs(const MachineFunction *MF) const override

getIPRACSRegs - This API can be removed when rbp is safe to optimized out when IPRA is on.

Definition X86RegisterInfo.cpp:369

Register getFrameRegister(const MachineFunction &MF) const override

Definition X86RegisterInfo.cpp:1007

unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI) const

findDeadCallerSavedReg - Return a caller-saved register that isn't live when it reaches the "return" ...

Definition X86RegisterInfo.cpp:960

const uint32_t * getDarwinTLSCallPreservedMask() const

Definition X86RegisterInfo.cpp:494

bool isTileRegisterClass(const TargetRegisterClass *RC) const

Return true if it is tile register class.

Definition X86RegisterInfo.cpp:692

bool isNonRex2RegClass(const TargetRegisterClass *RC) const

Definition X86RegisterInfo.cpp:1194

const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override

Definition X86RegisterInfo.cpp:383

Register getPtrSizedStackRegister(const MachineFunction &MF) const

Definition X86RegisterInfo.cpp:1022

bool isArgumentRegister(const MachineFunction &MF, MCRegister Reg) const override

isArgumentReg - Returns true if Reg can be used as an argument to a function.

Definition X86RegisterInfo.cpp:636

Register getStackRegister() const

const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &MF) const override

Definition X86RegisterInfo.cpp:114

const TargetRegisterClass * getMatchingSuperRegClass(const TargetRegisterClass *A, const TargetRegisterClass *B, unsigned Idx) const override

getMatchingSuperRegClass - Return a subclass of the specified register class A so that each register ...

Definition X86RegisterInfo.cpp:101

unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override

Definition X86RegisterInfo.cpp:219

const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override

getCrossCopyRegClass - Returns a legal register class to copy a register in the specified class to or...

Definition X86RegisterInfo.cpp:208

X86RegisterInfo(const Triple &TT)

Definition X86RegisterInfo.cpp:55

const TargetRegisterClass * constrainRegClassToNonRex2(const TargetRegisterClass *RC) const

Definition X86RegisterInfo.cpp:1174

Register getBaseRegister() const

bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override

Definition X86RegisterInfo.cpp:1076

void eliminateFrameIndex(MachineBasicBlock::iterator II, unsigned FIOperandNum, Register BaseReg, int FIOffset) const

Definition X86RegisterInfo.cpp:810

const uint32_t * getNoPreservedMask() const override

Definition X86RegisterInfo.cpp:490

bool isFixedRegister(const MachineFunction &MF, MCRegister PhysReg) const override

Returns true if PhysReg is a fixed register.

Definition X86RegisterInfo.cpp:675

const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const override

Definition X86RegisterInfo.cpp:89

const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override

getCalleeSavedRegs - Return a null-terminated list of all of the callee-save registers on this target...

Definition X86RegisterInfo.cpp:239

void adjustStackMapLiveOutMask(uint32_t *Mask) const override

Definition X86RegisterInfo.cpp:696

const X86TargetLowering * getTargetLowering() const override

bool isTarget64BitILP32() const

Is this x86_64 with the ILP32 programming model (x32 ABI)?

bool supportSwiftError() const override

Return true if the target supports swifterror attribute.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

@ X86_64_SysV

The C convention as specified in the x86-64 supplement to the System V ABI, used on most non-Windows ...

@ HiPE

Used by the High-Performance Erlang Compiler (HiPE).

@ CFGuard_Check

Special calling convention on Windows for calling the Control Guard Check ICall funtion.

@ PreserveMost

Used for runtime calls that preserves most registers.

@ AnyReg

OBSOLETED - Used for stack based JavaScript calls.

@ CXX_FAST_TLS

Used for access functions.

@ X86_INTR

x86 hardware interrupt context.

@ GHC

Used by the Glasgow Haskell Compiler (GHC).

@ Cold

Attempts to make code in the caller as efficient as possible under the assumption that the call is no...

@ PreserveAll

Used for runtime calls that preserves (almost) all registers.

@ Intel_OCL_BI

Used for Intel OpenCL built-ins.

@ PreserveNone

Used for runtime calls that preserves none general registers.

@ Win64

The C convention as implemented on Windows/x86-64 and AArch64.

@ SwiftTail

This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...

@ GRAAL

Used by GraalVM. Two additional registers are reserved.

@ X86_RegCall

Register calling convention used for parameters transfer optimization.

void initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI)

Define some predicates that are used for node matching.

unsigned getNonNDVariant(unsigned Opc)

initializer< Ty > init(const Ty &Val)

This is an optimization pass for GlobalISel generic memory operations.

MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)

Builder interface. Specify how to create the initial instruction itself.

constexpr bool isInt(int64_t x)

Checks if an integer fits into the given bit width.

MCRegister getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High=false)

constexpr from_range_t from_range

bool any_of(R &&range, UnaryPredicate P)

Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.

static bool isFuncletReturnInstr(const MachineInstr &MI)

LLVM_ABI raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

FormattedNumber format_hex(uint64_t N, unsigned Width, bool Upper=false)

format_hex - Output N as a fixed width hexadecimal.

uint16_t MCPhysReg

An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...

bool is_contained(R &&Range, const E &Element)

Returns true if Element is found in Range.