LLVM: lib/Target/X86/X86ExpandPseudo.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

14

27using namespace llvm;

28

29#define DEBUG_TYPE "x86-pseudo"

30#define X86_EXPAND_PSEUDO_NAME "X86 pseudo instruction expansion pass"

31

32namespace {

34public:

35 static char ID;

37

38 void getAnalysisUsage(AnalysisUsage &AU) const override {

43 }

44

50

52

55 }

56

57 StringRef getPassName() const override {

58 return "X86 pseudo instruction expansion pass";

59 }

60

61private:

68

69

70

71

72 bool expandPseudosWhichAffectControlFlow(MachineFunction &MF);

73

74

75

76

77 void expandVastartSaveXmmRegs(

80};

81char X86ExpandPseudo::ID = 0;

82

83}

84

86 false)

87

88void X86ExpandPseudo::expandICallBranchFunnel(

95 ++InsPt;

96

97 std::vector<std::pair<MachineBasicBlock *, unsigned>> TargetMBBs;

101

102 auto CmpTarget = [&](unsigned Target) {

103 if (Selector.isReg())

104 MBB->addLiveIn(Selector.getReg());

113 .add(Selector)

115 };

116

117 auto CreateMBB = [&]() {

119 MBB->addSuccessor(NewMBB);

120 if (MBB->isLiveIn(X86::EFLAGS))

121 MBB->addLiveIn(X86::EFLAGS);

122 return NewMBB;

123 };

124

125 auto EmitCondJump = [&](unsigned CC, MachineBasicBlock *ThenMBB) {

127

128 auto *ElseMBB = CreateMBB();

129 MF->insert(InsPt, ElseMBB);

130 MBB = ElseMBB;

132 };

133

134 auto EmitCondJumpTarget = [&](unsigned CC, unsigned Target) {

135 auto *ThenMBB = CreateMBB();

136 TargetMBBs.push_back({ThenMBB, Target});

137 EmitCondJump(CC, ThenMBB);

138 };

139

140 auto EmitTailCall = [&](unsigned Target) {

143 };

144

145 std::function<void(unsigned, unsigned)> EmitBranchFunnel =

146 [&](unsigned FirstTarget, unsigned NumTargets) {

147 if (NumTargets == 1) {

149 return;

150 }

151

152 if (NumTargets == 2) {

156 return;

157 }

158

159 if (NumTargets < 6) {

163 EmitBranchFunnel(FirstTarget + 2, NumTargets - 2);

164 return;

165 }

166

167 auto *ThenMBB = CreateMBB();

168 CmpTarget(FirstTarget + (NumTargets / 2));

171 EmitBranchFunnel(FirstTarget + (NumTargets / 2) + 1,

172 NumTargets - (NumTargets / 2) - 1);

173

174 MF->insert(InsPt, ThenMBB);

175 MBB = ThenMBB;

177 EmitBranchFunnel(FirstTarget, NumTargets / 2);

178 };

179

180 EmitBranchFunnel(0, (JTInst->getNumOperands() - 2) / 2);

181 for (auto P : TargetMBBs) {

182 MF->insert(InsPt, P.first);

185 }

186 JTMBB->erase(JTInst);

187}

188

191

192

193 MachineInstr &MI = *MBBI;

194

195 MachineInstr *OriginalCall;

196 assert((MI.getOperand(1).isGlobal() || MI.getOperand(1).isReg()) &&

197 "invalid operand for regular call");

198 unsigned Opc = -1;

199 if (MI.getOpcode() == X86::CALL64m_RVMARKER)

200 Opc = X86::CALL64m;

201 else if (MI.getOpcode() == X86::CALL64r_RVMARKER)

202 Opc = X86::CALL64r;

203 else if (MI.getOpcode() == X86::CALL64pcrel32_RVMARKER)

204 Opc = X86::CALL64pcrel32;

205 else

207

209 bool RAXImplicitDead = false;

211

212

213 if (Op.isReg() && Op.isImplicit() && Op.isDead() &&

214 TRI->regsOverlap(Op.getReg(), X86::RAX)) {

215 Op.setIsDead(false);

216 Op.setIsDef(true);

217 RAXImplicitDead = true;

218 }

220 }

221

222

223

224

225

226

228 auto *Marker = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(X86::MOV64rr))

232 if (MI.shouldUpdateAdditionalCallInfo())

234

235

236 const uint32_t *RegMask =

237 TRI->getCallPreservedMask(*MBB.getParent(), CallingConv::C);

238 MachineInstr *RtCall =

247 MI.eraseFromParent();

248

250

251

252 if (TM.getTargetTriple().isOSDarwin())

255}

256

257

258

259

260bool X86ExpandPseudo::expandMI(MachineBasicBlock &MBB,

262 MachineInstr &MI = *MBBI;

263 unsigned Opcode = MI.getOpcode();

265#define GET_EGPR_IF_ENABLED(OPC) (STI->hasEGPR() ? OPC##_EVEX : OPC)

266 switch (Opcode) {

267 default:

268 return false;

269 case X86::TCRETURNdi:

270 case X86::TCRETURNdicc:

271 case X86::TCRETURNri:

272 case X86::TCRETURN_WIN64ri:

273 case X86::TCRETURN_HIPE32ri:

274 case X86::TCRETURNmi:

275 case X86::TCRETURNdi64:

276 case X86::TCRETURNdi64cc:

277 case X86::TCRETURNri64:

278 case X86::TCRETURNri64_ImpCall:

279 case X86::TCRETURNmi64:

280 case X86::TCRETURN_WINmi64: {

281 bool isMem = Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64 ||

282 Opcode == X86::TCRETURN_WINmi64;

283 MachineOperand &JumpTarget = MBBI->getOperand(0);

285 : 1);

286 assert(StackAdjust.isImm() && "Expecting immediate value.");

287

288

289 int StackAdj = StackAdjust.getImm();

292 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");

293

294

295 Offset = StackAdj - MaxTCDelta;

296 assert(Offset >= 0 && "Offset should never be negative");

297

298 if (Opcode == X86::TCRETURNdicc || Opcode == X86::TCRETURNdi64cc) {

299 assert(Offset == 0 && "Conditional tail call cannot adjust the stack.");

300 }

301

303

306 }

307

308

310

311 if (Opcode == X86::TCRETURNdi || Opcode == X86::TCRETURNdicc ||

312 Opcode == X86::TCRETURNdi64 || Opcode == X86::TCRETURNdi64cc) {

313 unsigned Op;

314 switch (Opcode) {

315 case X86::TCRETURNdi:

316 Op = X86::TAILJMPd;

317 break;

318 case X86::TCRETURNdicc:

319 Op = X86::TAILJMPd_CC;

320 break;

321 case X86::TCRETURNdi64cc:

323 "Conditional tail calls confuse "

324 "the Win64 unwinder.");

325 Op = X86::TAILJMPd64_CC;

326 break;

327 default:

328

329

330 Op = X86::TAILJMPd64;

331 break;

332 }

337 } else {

341 }

342 if (Op == X86::TAILJMPd_CC || Op == X86::TAILJMPd64_CC) {

343 MIB.addImm(MBBI->getOperand(2).getImm());

344 }

345

346 } else if (Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64 ||

347 Opcode == X86::TCRETURN_WINmi64) {

348 unsigned Op = (Opcode == X86::TCRETURNmi)

349 ? X86::TAILJMPm

350 : (IsX64 ? X86::TAILJMPm64_REX : X86::TAILJMPm64);

353 MIB.add(MBBI->getOperand(i));

354 } else if (Opcode == X86::TCRETURNri64 ||

355 Opcode == X86::TCRETURNri64_ImpCall ||

356 Opcode == X86::TCRETURN_WIN64ri) {

359 TII->get(IsX64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64))

360 .add(JumpTarget);

361 } else {

362 assert(!IsX64 && "Win64 and UEFI64 require REX for indirect jumps.");

365 .add(JumpTarget);

366 }

367

368 MachineInstr &NewMI = *std::prev(MBBI);

371

372

373 if (MBBI->isCandidateForAdditionalCallInfo())

375

376

378

379 return true;

380 }

381 case X86::EH_RETURN:

382 case X86::EH_RETURN64: {

383 MachineOperand &DestAddr = MBBI->getOperand(0);

384 assert(DestAddr.isReg() && "Offset should be in register!");

388 TII->get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), StackPtr)

390

391 return true;

392 }

393 case X86::IRET: {

394

395 int64_t StackAdj = MBBI->getOperand(0).getImm();

397

398 unsigned RetOp = STI->is64Bit() ? X86::IRET64 : X86::IRET32;

399

400 if (STI->is64Bit() && STI->hasUINTR() &&

402 RetOp = X86::UIRET;

405 return true;

406 }

407 case X86::RET: {

408

409 int64_t StackAdj = MBBI->getOperand(0).getImm();

410 MachineInstrBuilder MIB;

411 if (StackAdj == 0) {

413 TII->get(STI->is64Bit() ? X86::RET64 : X86::RET32));

416 TII->get(STI->is64Bit() ? X86::RETI64 : X86::RETI32))

418 } else {

419 assert(!STI->is64Bit() &&

420 "shouldn't need to do this for x86_64 targets!");

421

422

427 }

428 for (unsigned I = 1, E = MBBI->getNumOperands(); I != E; ++I)

429 MIB.add(MBBI->getOperand(I));

431 return true;

432 }

433 case X86::LCMPXCHG16B_SAVE_RBX: {

434

435

436

437

438

439

440 const MachineOperand &InArg = MBBI->getOperand(6);

441 Register SaveRbx = MBBI->getOperand(7).getReg();

442

443

444

445

446

448

449 MachineInstr *NewInstr = BuildMI(MBB, MBBI, DL, TII->get(X86::LCMPXCHG16B));

450

451

452 const MachineOperand &Base = MBBI->getOperand(1);

453 if (Base.getReg() == X86::RBX || Base.getReg() == X86::EBX)

455 Base.getReg() == X86::RBX

456 ? SaveRbx

457 : Register(TRI->getSubReg(SaveRbx, X86::sub_32bit)),

458 false));

459 else

463

465 true);

466

467

469 return true;

470 }

471

472

473

474

475

476

477

478

479 case X86::MASKPAIR16LOAD: {

481 assert(Disp >= 0 && Disp <= INT32_MAX - 2 && "Unexpected displacement");

483 bool DstIsDead = MBBI->getOperand(0).isDead();

484 Register Reg0 = TRI->getSubReg(Reg, X86::sub_mask_0);

485 Register Reg1 = TRI->getSubReg(Reg, X86::sub_mask_1);

486

487 auto MIBLo =

490 auto MIBHi =

493

495 MIBLo.add(MBBI->getOperand(1 + i));

497 MIBHi.addImm(Disp + 2);

498 else

499 MIBHi.add(MBBI->getOperand(1 + i));

500 }

501

502

503 MachineMemOperand *OldMMO = MBBI->memoperands().front();

507

508 MIBLo.setMemRefs(MMOLo);

509 MIBHi.setMemRefs(MMOHi);

510

511

513 return true;

514 }

515 case X86::MASKPAIR16STORE: {

517 assert(Disp >= 0 && Disp <= INT32_MAX - 2 && "Unexpected displacement");

520 Register Reg0 = TRI->getSubReg(Reg, X86::sub_mask_0);

521 Register Reg1 = TRI->getSubReg(Reg, X86::sub_mask_1);

522

523 auto MIBLo =

525 auto MIBHi =

527

529 MIBLo.add(MBBI->getOperand(i));

531 MIBHi.addImm(Disp + 2);

532 else

533 MIBHi.add(MBBI->getOperand(i));

534 }

537

538

539 MachineMemOperand *OldMMO = MBBI->memoperands().front();

543

544 MIBLo.setMemRefs(MMOLo);

545 MIBHi.setMemRefs(MMOHi);

546

547

549 return true;

550 }

551 case X86::MWAITX_SAVE_RBX: {

552

553

554

555

556

557

558 const MachineOperand &InArg = MBBI->getOperand(1);

559

560

562

564

565 Register SaveRbx = MBBI->getOperand(2).getReg();

567

569 return true;

570 }

571 case TargetOpcode::ICALL_BRANCH_FUNNEL:

572 expandICallBranchFunnel(&MBB, MBBI);

573 return true;

574 case X86::PLDTILECFGV: {

576 return true;

577 }

578 case X86::PTILELOADDV:

579 case X86::PTILELOADDT1V:

580 case X86::PTILELOADDRSV:

581 case X86::PTILELOADDRST1V:

582 case X86::PTCVTROWD2PSrreV:

583 case X86::PTCVTROWD2PSrriV:

584 case X86::PTCVTROWPS2BF16HrreV:

585 case X86::PTCVTROWPS2BF16HrriV:

586 case X86::PTCVTROWPS2BF16LrreV:

587 case X86::PTCVTROWPS2BF16LrriV:

588 case X86::PTCVTROWPS2PHHrreV:

589 case X86::PTCVTROWPS2PHHrriV:

590 case X86::PTCVTROWPS2PHLrreV:

591 case X86::PTCVTROWPS2PHLrriV:

592 case X86::PTILEMOVROWrreV:

593 case X86::PTILEMOVROWrriV: {

594 for (unsigned i = 2; i > 0; --i)

595 MI.removeOperand(i);

596 unsigned Opc;

597 switch (Opcode) {

598 case X86::PTILELOADDRSV:

600 break;

601 case X86::PTILELOADDRST1V:

603 break;

604 case X86::PTILELOADDV:

606 break;

607 case X86::PTILELOADDT1V:

609 break;

610 case X86::PTCVTROWD2PSrreV:

611 Opc = X86::TCVTROWD2PSrte;

612 break;

613 case X86::PTCVTROWD2PSrriV:

614 Opc = X86::TCVTROWD2PSrti;

615 break;

616 case X86::PTCVTROWPS2BF16HrreV:

617 Opc = X86::TCVTROWPS2BF16Hrte;

618 break;

619 case X86::PTCVTROWPS2BF16HrriV:

620 Opc = X86::TCVTROWPS2BF16Hrti;

621 break;

622 case X86::PTCVTROWPS2BF16LrreV:

623 Opc = X86::TCVTROWPS2BF16Lrte;

624 break;

625 case X86::PTCVTROWPS2BF16LrriV:

626 Opc = X86::TCVTROWPS2BF16Lrti;

627 break;

628 case X86::PTCVTROWPS2PHHrreV:

629 Opc = X86::TCVTROWPS2PHHrte;

630 break;

631 case X86::PTCVTROWPS2PHHrriV:

632 Opc = X86::TCVTROWPS2PHHrti;

633 break;

634 case X86::PTCVTROWPS2PHLrreV:

635 Opc = X86::TCVTROWPS2PHLrte;

636 break;

637 case X86::PTCVTROWPS2PHLrriV:

638 Opc = X86::TCVTROWPS2PHLrti;

639 break;

640 case X86::PTILEMOVROWrreV:

641 Opc = X86::TILEMOVROWrte;

642 break;

643 case X86::PTILEMOVROWrriV:

644 Opc = X86::TILEMOVROWrti;

645 break;

646 default:

648 }

650 return true;

651 }

652 case X86::PTCMMIMFP16PSV:

653 case X86::PTCMMRLFP16PSV:

654 case X86::PTDPBSSDV:

655 case X86::PTDPBSUDV:

656 case X86::PTDPBUSDV:

657 case X86::PTDPBUUDV:

658 case X86::PTDPBF16PSV:

659 case X86::PTDPFP16PSV:

660 case X86::PTMMULTF32PSV:

661 case X86::PTDPBF8PSV:

662 case X86::PTDPBHF8PSV:

663 case X86::PTDPHBF8PSV:

664 case X86::PTDPHF8PSV: {

665 MI.untieRegOperand(4);

666 for (unsigned i = 3; i > 0; --i)

667 MI.removeOperand(i);

668 unsigned Opc;

669 switch (Opcode) {

670

671 case X86::PTCMMIMFP16PSV: Opc = X86::TCMMIMFP16PS; break;

672 case X86::PTCMMRLFP16PSV: Opc = X86::TCMMRLFP16PS; break;

673 case X86::PTDPBSSDV: Opc = X86::TDPBSSD; break;

674 case X86::PTDPBSUDV: Opc = X86::TDPBSUD; break;

675 case X86::PTDPBUSDV: Opc = X86::TDPBUSD; break;

676 case X86::PTDPBUUDV: Opc = X86::TDPBUUD; break;

677 case X86::PTDPBF16PSV: Opc = X86::TDPBF16PS; break;

678 case X86::PTDPFP16PSV: Opc = X86::TDPFP16PS; break;

679 case X86::PTMMULTF32PSV: Opc = X86::TMMULTF32PS; break;

680 case X86::PTDPBF8PSV: Opc = X86::TDPBF8PS; break;

681 case X86::PTDPBHF8PSV: Opc = X86::TDPBHF8PS; break;

682 case X86::PTDPHBF8PSV: Opc = X86::TDPHBF8PS; break;

683 case X86::PTDPHF8PSV: Opc = X86::TDPHF8PS; break;

684

685 default:

687 }

689 MI.tieOperands(0, 1);

690 return true;

691 }

692 case X86::PTILESTOREDV: {

693 for (int i = 1; i >= 0; --i)

694 MI.removeOperand(i);

696 return true;

697 }

698#undef GET_EGPR_IF_ENABLED

699 case X86::PTILEZEROV: {

700 for (int i = 2; i > 0; --i)

701 MI.removeOperand(i);

702 MI.setDesc(TII->get(X86::TILEZERO));

703 return true;

704 }

705 case X86::CALL64pcrel32_RVMARKER:

706 case X86::CALL64r_RVMARKER:

707 case X86::CALL64m_RVMARKER:

708 expandCALL_RVMARKER(MBB, MBBI);

709 return true;

710 case X86::CALL64r_ImpCall:

711 MI.setDesc(TII->get(X86::CALL64r));

712 return true;

713 case X86::ADD32mi_ND:

714 case X86::ADD64mi32_ND:

715 case X86::SUB32mi_ND:

716 case X86::SUB64mi32_ND:

717 case X86::AND32mi_ND:

718 case X86::AND64mi32_ND:

719 case X86::OR32mi_ND:

720 case X86::OR64mi32_ND:

721 case X86::XOR32mi_ND:

722 case X86::XOR64mi32_ND:

723 case X86::ADC32mi_ND:

724 case X86::ADC64mi32_ND:

725 case X86::SBB32mi_ND:

726 case X86::SBB64mi32_ND: {

727

728

729

730

731

732

733

734

735

736

737

738

739

740

741

742

743 const MachineOperand &ImmOp =

744 MI.getOperand(MI.getNumExplicitOperands() - 1);

745

747 return false;

749 const MachineOperand &DispOp = MI.getOperand(MemOpNo + X86::AddrDisp);

751

753 return false;

754

759 if (X86MCRegisterClasses[X86::GR32RegClassID].contains(Base) ||

760 X86MCRegisterClasses[X86::GR32RegClassID].contains(Index))

763 return false;

764 unsigned Opc, LoadOpc;

765 switch (Opcode) {

766#define MI_TO_RI(OP) \

767 case X86::OP##32mi_ND: \

768 Opc = X86::OP##32ri; \

769 LoadOpc = X86::MOV32rm; \

770 break; \

771 case X86::OP##64mi32_ND: \

772 Opc = X86::OP##64ri32; \

773 LoadOpc = X86::MOV64rm; \

774 break;

775

776 default:

785#undef MI_TO_RI

786 }

787

788 Register DestReg = MI.getOperand(0).getReg();

791 .add(ImmOp);

792

793 for (unsigned I = MI.getNumImplicitOperands() + 1; I != 0; --I)

794 MI.removeOperand(MI.getNumOperands() - 1);

795 MI.setDesc(TII->get(LoadOpc));

796 return true;

797 }

798 }

800}

801

802

803

804

805

806

807

808

809

810

811

812

813

814

815

816void X86ExpandPseudo::expandVastartSaveXmmRegs(

817 MachineBasicBlock *EntryBlk,

819 assert(VAStartPseudoInstr->getOpcode() == X86::VASTART_SAVE_XMM_REGS);

820

823 const DebugLoc &DL = VAStartPseudoInstr->getDebugLoc();

824 Register CountReg = VAStartPseudoInstr->getOperand(0).getReg();

825

826

829

830 LiveRegs.addLiveIns(*EntryBlk);

831 for (MachineInstr &MI : EntryBlk->instrs()) {

832 if (MI.getOpcode() == VAStartPseudoInstr->getOpcode())

833 break;

834

835 LiveRegs.stepForward(MI, Clobbers);

836 }

837

838

839

840

843 MachineBasicBlock *GuardedRegsBlk = Func->CreateMachineBasicBlock(LLVMBlk);

844 MachineBasicBlock *TailBlk = Func->CreateMachineBasicBlock(LLVMBlk);

845 Func->insert(EntryBlkIter, GuardedRegsBlk);

846 Func->insert(EntryBlkIter, TailBlk);

847

848

849 TailBlk->splice(TailBlk->begin(), EntryBlk,

851 EntryBlk->end());

853

854 uint64_t FrameOffset = VAStartPseudoInstr->getOperand(4).getImm();

855 uint64_t VarArgsRegsOffset = VAStartPseudoInstr->getOperand(6).getImm();

856

857

858 unsigned MOVOpc = STI->hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr;

859

860

861 for (int64_t OpndIdx = 7, RegIdx = 0;

862 OpndIdx < VAStartPseudoInstr->getNumOperands() - 1;

863 OpndIdx++, RegIdx++) {

864 auto NewMI = BuildMI(GuardedRegsBlk, DL, TII->get(MOVOpc));

867 NewMI.addImm(FrameOffset + VarArgsRegsOffset + RegIdx * 16);

868 else

869 NewMI.add(VAStartPseudoInstr->getOperand(i + 1));

870 }

871 NewMI.addReg(VAStartPseudoInstr->getOperand(OpndIdx).getReg());

872 assert(VAStartPseudoInstr->getOperand(OpndIdx).getReg().isPhysical());

873 }

874

875

877

879

881

882 BuildMI(EntryBlk, DL, TII->get(X86::TEST8rr))

889 }

890

891

892 addLiveIns(*GuardedRegsBlk, LiveRegs);

894

895

896 VAStartPseudoInstr->eraseFromParent();

897}

898

899

900

901bool X86ExpandPseudo::expandMBB(MachineBasicBlock &MBB) {

903

904

906 while (MBBI != E) {

909 MBBI = NMBBI;

910 }

911

913}

914

915bool X86ExpandPseudo::expandPseudosWhichAffectControlFlow(MachineFunction &MF) {

916

917

918

919 for (MachineInstr &Instr : MF.front().instrs()) {

920 if (Instr.getOpcode() == X86::VASTART_SAVE_XMM_REGS) {

921 expandVastartSaveXmmRegs(&(MF.front()), Instr);

922 return true;

923 }

924 }

925

926 return false;

927}

928

929bool X86ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {

933 X86FI = MF.getInfo();

935

936 bool Modified = expandPseudosWhichAffectControlFlow(MF);

937

938 for (MachineBasicBlock &MBB : MF)

941}

942

943

945 return new X86ExpandPseudo();

946}

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

MachineBasicBlock MachineBasicBlock::iterator MBBI

static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")

const HexagonInstrInfo * TII

This file implements the LivePhysRegs utility for tracking liveness of physical registers.

Register const TargetRegisterInfo * TRI

Promote Memory to Register

#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)

static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)

static Target * FirstTarget

#define GET_EGPR_IF_ENABLED(OPC)

#define X86_EXPAND_PSEUDO_NAME

Definition X86ExpandPseudo.cpp:30

Represent the analysis usage information of a pass.

AnalysisUsage & addPreservedID(const void *ID)

LLVM_ABI void setPreservesCFG()

This function should be called by the pass, iff they do not:

LLVM Basic Block Representation.

FunctionPass class - This class is used to implement most global optimizations.

void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override

Emit instructions to copy a pair of physical registers.

LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)

Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...

const BasicBlock * getBasicBlock() const

Return the LLVM basic block that this instance corresponded to originally.

LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())

Add Succ as a successor of this MachineBasicBlock.

LLVM_ABI void eraseFromParent()

This method unlinks 'this' from the containing function and deletes it.

const MachineFunction * getParent() const

Return the MachineFunction containing this basic block.

LLVM_ABI instr_iterator erase(instr_iterator I)

Remove an instruction from the instruction list and delete it.

void splice(iterator Where, MachineBasicBlock *Other, iterator From)

Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...

MachineInstrBundleIterator< MachineInstr > iterator

MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...

void getAnalysisUsage(AnalysisUsage &AU) const override

getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.

Properties which a MachineFunction may have at a given point in time.

void moveAdditionalCallInfo(const MachineInstr *Old, const MachineInstr *New)

Move the call site info from Old to \New call site info.

const TargetSubtargetInfo & getSubtarget() const

getSubtarget - Return the subtarget for which this machine code is being compiled.

MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)

getMachineMemOperand - Allocate a new MachineMemOperand.

BasicBlockListType::iterator iterator

Ty * getInfo()

getInfo - Keep track of various per-function pieces of information for backends that would like to do...

const MachineBasicBlock & front() const

MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)

CreateMachineInstr - Allocate a new MachineInstr.

void insert(iterator MBBI, MachineBasicBlock *MBB)

const TargetMachine & getTarget() const

getTarget - Return the target machine this machine code is compiled with

const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const

const MachineInstrBuilder & addImm(int64_t Val) const

Add a new immediate operand.

const MachineInstrBuilder & add(const MachineOperand &MO) const

const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const

const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const

const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const

Add a new virtual register operand.

const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const

MachineInstr * getInstr() const

If conversion operators fail, use this method to get the MachineInstr explicitly.

Representation of each machine instruction.

LLVM_ABI void setCFIType(MachineFunction &MF, uint32_t Type)

Set the CFI type for the instruction.

unsigned getNumOperands() const

Retuns the total number of operands.

LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)

Add the specified operand to the instruction.

LLVM_ABI void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI)

Copy implicit register operands from specified instruction to this instruction.

const DebugLoc & getDebugLoc() const

Returns the debug location id of this MachineInstr.

const MachineOperand & getOperand(unsigned i) const

MachineOperand class - Representation of each machine instruction operand.

const GlobalValue * getGlobal() const

bool isReg() const

isReg - Tests if this is a MO_Register operand.

bool isImm() const

isImm - Tests if this is a MO_Immediate operand.

bool isSymbol() const

isSymbol - Tests if this is a MO_ExternalSymbol operand.

void setIsKill(bool Val=true)

unsigned getTargetFlags() const

bool isGlobal() const

isGlobal - Tests if this is a MO_GlobalAddress operand.

const char * getSymbolName() const

Register getReg() const

getReg - Returns the register number.

static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)

int64_t getOffset() const

Return the offset from the symbol in this operand.

StringRef - Represent a constant reference to a string, i.e.

CodeModel::Model getCodeModel() const

Returns the code model.

Target - Wrapper for Target specific information.

bool isOSWindows() const

Tests whether the OS is Windows.

int64_t mergeSPAdd(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, int64_t AddOffset, bool doMergeWithPrevious) const

Equivalent to: mergeSPUpdates(MBB, MBBI, [AddOffset](int64_t Offset) { return AddOffset + Offset; }...

void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &DL, int64_t NumBytes, bool InEpilogue) const

Emit a series of instructions to increment / decrement the stack pointer by a constant value.

X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...

int getTCReturnAddrDelta() const

bool isTargetWin64() const

bool isTarget64BitLP64() const

Is this x86_64 with the LP64 programming model (standard AMD64, no x32)?

const Triple & getTargetTriple() const

const X86InstrInfo * getInstrInfo() const override

bool isCallingConvWin64(CallingConv::ID CC) const

bool isTargetUEFI64() const

const X86RegisterInfo * getRegisterInfo() const override

const X86FrameLowering * getFrameLowering() const override

self_iterator getIterator()

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

@ BasicBlock

Various leaf nodes.

@ Implicit

Not emitted register (e.g. carry, or temporary result).

@ Define

Register definition.

@ X86

Windows x64, Windows Itanium (IA-64)

bool needSIB(MCRegister BaseReg, MCRegister IndexReg, bool In64BitMode)

int getFirstAddrOperandIdx(const MachineInstr &MI)

Return the index of the instruction's first address operand, if it has a memory reference,...

NodeAddr< InstrNode * > Instr

NodeAddr< FuncNode * > Func

This is an optimization pass for GlobalISel generic memory operations.

auto drop_begin(T &&RangeOrContainer, size_t N=1)

Return a range covering RangeOrContainer with the first N elements excluded.

LLVM_ABI void finalizeBundle(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)

finalizeBundle - Finalize a machine instruction bundle which includes a sequence of instructions star...

MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)

Builder interface. Specify how to create the initial instruction itself.

constexpr bool isInt(int64_t x)

Checks if an integer fits into the given bit width.

static bool isMem(const MachineInstr &MI, unsigned Op)

LLVM_ABI char & MachineDominatorsID

MachineDominators - This pass is a machine dominators analysis pass.

unsigned getDeadRegState(bool B)

LLVM_ABI char & MachineLoopInfoID

MachineLoopInfo - This pass is a loop analysis pass.

FunctionPass * createX86ExpandPseudoPass()

Return a Machine IR pass that expands X86-specific pseudo instructions into a sequence of actual inst...

Definition X86ExpandPseudo.cpp:944

FunctionAddr VTableAddr Count

constexpr bool isUInt(uint64_t x)

Checks if an unsigned integer fits into the given bit width.

class LLVM_GSL_OWNER SmallVector

Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...

unsigned getKillRegState(bool B)

DWARFExpression::Operation Op

void addLiveIns(MachineBasicBlock &MBB, const LivePhysRegs &LiveRegs)

Adds registers contained in LiveRegs to the block live-in list of MBB.