LLVM: lib/Target/X86/GISel/X86InstructionSelector.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

40#include "llvm/IR/IntrinsicsX86.h"

46#include

47#include

48#include

49

50#define DEBUG_TYPE "X86-isel"

51

52using namespace llvm;

53

54namespace {

55

56#define GET_GLOBALISEL_PREDICATE_BITSET

57#include "X86GenGlobalISel.inc"

58#undef GET_GLOBALISEL_PREDICATE_BITSET

59

61public:

64

67

68private:

69

70

72

73

74 unsigned getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc,

75 Align Alignment) const;

76

77 unsigned getPtrLoadStoreOp(const LLT &Ty, const RegisterBank &RB,

78 unsigned Opc) const;

79

124

125 ComplexRendererFns selectAddr(MachineOperand &Root) const;

126

127

130

133

137

143

144#define GET_GLOBALISEL_PREDICATES_DECL

145#include "X86GenGlobalISel.inc"

146#undef GET_GLOBALISEL_PREDICATES_DECL

147

148#define GET_GLOBALISEL_TEMPORARIES_DECL

149#include "X86GenGlobalISel.inc"

150#undef GET_GLOBALISEL_TEMPORARIES_DECL

151};

152

153}

154

155#define GET_GLOBALISEL_IMPL

156#include "X86GenGlobalISel.inc"

157#undef GET_GLOBALISEL_IMPL

158

159X86InstructionSelector::X86InstructionSelector(const X86TargetMachine &TM,

162 : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()),

163 RBI(RBI),

165#include "X86GenGlobalISel.inc"

168#include "X86GenGlobalISel.inc"

170{

171}

172

173

174

176X86InstructionSelector::getRegClass(LLT Ty, const RegisterBank &RB) const {

177 if (RB.getID() == X86::GPRRegBankID) {

179 return &X86::GR8RegClass;

181 return &X86::GR16RegClass;

183 return &X86::GR32RegClass;

185 return &X86::GR64RegClass;

186 }

187 if (RB.getID() == X86::VECRRegBankID) {

189 return STI.hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;

191 return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;

193 return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;

195 return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;

197 return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;

199 return &X86::VR512RegClass;

200 }

201

202 if (RB.getID() == X86::PSRRegBankID) {

204 return &X86::RFP80RegClass;

206 return &X86::RFP64RegClass;

208 return &X86::RFP32RegClass;

209 }

210

212}

213

214const TargetRegisterClass *

215X86InstructionSelector::getRegClass(LLT Ty, Register Reg,

216 MachineRegisterInfo &MRI) const {

219}

220

221static unsigned getSubRegIndex(const TargetRegisterClass *RC) {

222 unsigned SubIdx = X86::NoSubRegister;

223 if (RC == &X86::GR32RegClass) {

224 SubIdx = X86::sub_32bit;

225 } else if (RC == &X86::GR16RegClass) {

226 SubIdx = X86::sub_16bit;

227 } else if (RC == &X86::GR8RegClass) {

228 SubIdx = X86::sub_8bit;

229 }

230

231 return SubIdx;

232}

233

237 return &X86::GR64RegClass;

239 return &X86::GR32RegClass;

241 return &X86::GR16RegClass;

243 return &X86::GR8RegClass;

244

246}

247

248

249

250

251bool X86InstructionSelector::selectDebugInstr(MachineInstr &I,

252 MachineRegisterInfo &MRI) const {

253 for (MachineOperand &MO : I.operands()) {

254 if (!MO.isReg())

255 continue;

257 if (Reg)

258 continue;

260 continue;

261 LLT Ty = MRI.getType(Reg);

263 const TargetRegisterClass *RC =

265 if (!RC) {

268 if (!RC) {

270 dbgs() << "Warning: DBG_VALUE operand has unexpected size/bank\n");

271 break;

272 }

273 }

275 }

276

277 return true;

278}

279

280

281bool X86InstructionSelector::selectCopy(MachineInstr &I,

282 MachineRegisterInfo &MRI) const {

283 Register DstReg = I.getOperand(0).getReg();

285 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);

286

287 Register SrcReg = I.getOperand(1).getReg();

289 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);

290

292 assert(I.isCopy() && "Generic operators do not allow physical registers");

293

294 if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID &&

295 DstRegBank.getID() == X86::GPRRegBankID) {

296

297 const TargetRegisterClass *SrcRC =

300

301 if (SrcRC != DstRC) {

302

303 Register ExtSrc = MRI.createVirtualRegister(DstRC);

304 BuildMI(*I.getParent(), I, I.getDebugLoc(),

305 TII.get(TargetOpcode::SUBREG_TO_REG))

309 .addImm(getSubRegIndex(SrcRC));

310

311 I.getOperand(1).setReg(ExtSrc);

312 }

313 }

314

315

316 if (SrcSize == 16 && SrcRegBank.getID() == X86::GPRRegBankID &&

317 (DstRegBank.getID() == X86::VECRRegBankID)) {

318

320

321

322 Register ExtReg = MRI.createVirtualRegister(&X86::GR32RegClass);

323 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::SUBREG_TO_REG),

324 ExtReg)

327 .addImm(X86::sub_16bit);

328

329

330 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::COPY), DstReg)

332

333 I.eraseFromParent();

334 }

335

336

337 if (DstSize == 16 && DstRegBank.getID() == X86::GPRRegBankID &&

338 (SrcRegBank.getID() == X86::VECRRegBankID)) {

339

341

342

343 Register Temp32 = MRI.createVirtualRegister(&X86::GR32RegClass);

344 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::COPY), Temp32)

346

347

348 if (Register Dst32 = TRI.getMatchingSuperReg(DstReg, X86::sub_16bit,

349 &X86::GR32RegClass)) {

350

351 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::COPY), Dst32)

353 } else {

354

355 BuildMI(*I.getParent(), I, DL, TII.get(TargetOpcode::COPY), DstReg)

356 .addReg(Temp32, 0, X86::sub_16bit);

357 }

358

359 I.eraseFromParent();

360 }

361

362 return true;

363 }

364

366 "No phys reg on generic operators");

367 assert((DstSize == SrcSize ||

368

369

372 "Copy with different width?!");

373

374 const TargetRegisterClass *DstRC =

376

377 if (SrcRegBank.getID() == X86::GPRRegBankID &&

378 DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize &&

380

381

383

384 if (DstRC != SrcRC) {

385 I.getOperand(1).setSubReg(getSubRegIndex(DstRC));

386 I.getOperand(1).substPhysReg(SrcReg, TRI);

387 }

388 }

389

390

391

392

393 const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);

397 << " operand\n");

398 return false;

399 }

400 }

401 I.setDesc(TII.get(X86::COPY));

402 return true;

403}

404

405bool X86InstructionSelector::select(MachineInstr &I) {

406 assert(I.getParent() && "Instruction should be in a basic block!");

407 assert(I.getParent()->getParent() && "Instruction should be in a function!");

408

412

413 unsigned Opcode = I.getOpcode();

415

416

417 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)

418 return false;

419

420 if (I.isCopy())

422

423 if (I.isDebugInstr())

425

426 return true;

427 }

428

429 assert(I.getNumOperands() == I.getNumExplicitOperands() &&

430 "Generic instruction has unexpected implicit operands\n");

431

432 if (selectImpl(I, *CoverageInfo))

433 return true;

434

436

437

438 switch (I.getOpcode()) {

439 default:

440 return false;

441 case TargetOpcode::G_STORE:

442 case TargetOpcode::G_LOAD:

444 case TargetOpcode::G_PTR_ADD:

445 case TargetOpcode::G_FRAME_INDEX:

446 return selectFrameIndexOrGep(I, MRI, MF);

447 case TargetOpcode::G_GLOBAL_VALUE:

448 return selectGlobalValue(I, MRI, MF);

449 case TargetOpcode::G_CONSTANT:

450 return selectConstant(I, MRI, MF);

451 case TargetOpcode::G_FCONSTANT:

452 return materializeFP(I, MRI, MF);

453 case TargetOpcode::G_PTRTOINT:

454 case TargetOpcode::G_TRUNC:

455 return selectTruncOrPtrToInt(I, MRI, MF);

456 case TargetOpcode::G_INTTOPTR:

457 case TargetOpcode::G_FREEZE:

459 case TargetOpcode::G_ZEXT:

460 return selectZext(I, MRI, MF);

461 case TargetOpcode::G_ANYEXT:

462 return selectAnyext(I, MRI, MF);

463 case TargetOpcode::G_ICMP:

464 return selectCmp(I, MRI, MF);

465 case TargetOpcode::G_FCMP:

466 return selectFCmp(I, MRI, MF);

467 case TargetOpcode::G_UADDE:

468 case TargetOpcode::G_UADDO:

469 case TargetOpcode::G_USUBE:

470 case TargetOpcode::G_USUBO:

471 return selectUAddSub(I, MRI, MF);

472 case TargetOpcode::G_UNMERGE_VALUES:

474 case TargetOpcode::G_MERGE_VALUES:

475 case TargetOpcode::G_CONCAT_VECTORS:

477 case TargetOpcode::G_EXTRACT:

478 return selectExtract(I, MRI, MF);

479 case TargetOpcode::G_INSERT:

480 return selectInsert(I, MRI, MF);

481 case TargetOpcode::G_BRCOND:

482 return selectCondBranch(I, MRI, MF);

483 case TargetOpcode::G_IMPLICIT_DEF:

484 case TargetOpcode::G_PHI:

485 return selectImplicitDefOrPHI(I, MRI);

486 case TargetOpcode::G_MUL:

487 case TargetOpcode::G_SMULH:

488 case TargetOpcode::G_UMULH:

489 case TargetOpcode::G_SDIV:

490 case TargetOpcode::G_UDIV:

491 case TargetOpcode::G_SREM:

492 case TargetOpcode::G_UREM:

493 return selectMulDivRem(I, MRI, MF);

494 case TargetOpcode::G_SELECT:

495 return selectSelect(I, MRI, MF);

496 }

497

498 return false;

499}

500

501unsigned X86InstructionSelector::getPtrLoadStoreOp(const LLT &Ty,

502 const RegisterBank &RB,

503 unsigned Opc) const {

504 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&

505 "Only G_STORE and G_LOAD are expected for selection");

506 if (Ty.isPointer() && X86::GPRRegBankID == RB.getID()) {

507 bool IsLoad = (Opc == TargetOpcode::G_LOAD);

509 default:

510 break;

511 case 32:

512 return IsLoad ? X86::MOV32rm : X86::MOV32mr;

513 case 64:

514 return IsLoad ? X86::MOV64rm : X86::MOV64mr;

515 }

516 }

517 return Opc;

518}

519

520unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,

521 const RegisterBank &RB,

522 unsigned Opc,

523 Align Alignment) const {

524 bool Isload = (Opc == TargetOpcode::G_LOAD);

525 bool HasAVX = STI.hasAVX();

526 bool HasAVX512 = STI.hasAVX512();

527 bool HasVLX = STI.hasVLX();

528

530 if (X86::GPRRegBankID == RB.getID())

531 return Isload ? X86::MOV8rm : X86::MOV8mr;

533 if (X86::GPRRegBankID == RB.getID())

534 return Isload ? X86::MOV16rm : X86::MOV16mr;

536 if (X86::GPRRegBankID == RB.getID())

537 return Isload ? X86::MOV32rm : X86::MOV32mr;

538 if (X86::VECRRegBankID == RB.getID())

539 return Isload ? (HasAVX512 ? X86::VMOVSSZrm_alt :

540 HasAVX ? X86::VMOVSSrm_alt :

541 X86::MOVSSrm_alt)

542 : (HasAVX512 ? X86::VMOVSSZmr :

543 HasAVX ? X86::VMOVSSmr :

544 X86::MOVSSmr);

545 if (X86::PSRRegBankID == RB.getID())

546 return Isload ? X86::LD_Fp32m : X86::ST_Fp32m;

548 if (X86::GPRRegBankID == RB.getID())

549 return Isload ? X86::MOV64rm : X86::MOV64mr;

550 if (X86::VECRRegBankID == RB.getID())

551 return Isload ? (HasAVX512 ? X86::VMOVSDZrm_alt :

552 HasAVX ? X86::VMOVSDrm_alt :

553 X86::MOVSDrm_alt)

554 : (HasAVX512 ? X86::VMOVSDZmr :

555 HasAVX ? X86::VMOVSDmr :

556 X86::MOVSDmr);

557 if (X86::PSRRegBankID == RB.getID())

558 return Isload ? X86::LD_Fp64m : X86::ST_Fp64m;

560 return Isload ? X86::LD_Fp80m : X86::ST_FpP80m;

562 if (Alignment >= Align(16))

563 return Isload ? (HasVLX ? X86::VMOVAPSZ128rm

564 : HasAVX512

565 ? X86::VMOVAPSZ128rm_NOVLX

566 : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)

567 : (HasVLX ? X86::VMOVAPSZ128mr

568 : HasAVX512

569 ? X86::VMOVAPSZ128mr_NOVLX

570 : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);

571 else

572 return Isload ? (HasVLX ? X86::VMOVUPSZ128rm

573 : HasAVX512

574 ? X86::VMOVUPSZ128rm_NOVLX

575 : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)

576 : (HasVLX ? X86::VMOVUPSZ128mr

577 : HasAVX512

578 ? X86::VMOVUPSZ128mr_NOVLX

579 : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);

581 if (Alignment >= Align(32))

582 return Isload ? (HasVLX ? X86::VMOVAPSZ256rm

583 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX

584 : X86::VMOVAPSYrm)

585 : (HasVLX ? X86::VMOVAPSZ256mr

586 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX

587 : X86::VMOVAPSYmr);

588 else

589 return Isload ? (HasVLX ? X86::VMOVUPSZ256rm

590 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX

591 : X86::VMOVUPSYrm)

592 : (HasVLX ? X86::VMOVUPSZ256mr

593 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX

594 : X86::VMOVUPSYmr);

596 if (Alignment >= Align(64))

597 return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;

598 else

599 return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;

600 }

601 return Opc;

602}

603

604

608 assert(I.getOperand(0).isReg() && "unsupported operand.");

609 assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&

610 "unsupported type.");

611

612 switch (I.getOpcode()) {

613 default:

614 break;

615 case TargetOpcode::G_FRAME_INDEX:

618 return true;

619 case TargetOpcode::G_PTR_ADD: {

621 int64_t Imm = *COff;

622 if (isInt<32>(Imm)) {

623 AM.Disp = static_cast<int32_t>(Imm);

624 AM.Base.Reg = I.getOperand(1).getReg();

625 return true;

626 }

627 }

628 break;

629 }

630 case TargetOpcode::G_GLOBAL_VALUE: {

631 auto GV = I.getOperand(1).getGlobal();

632 if (GV->isThreadLocal()) {

633 return false;

634 }

635

637 return false;

638 AM.GV = GV;

640

641

643 return false;

644

645

647 return false;

648

650

652 "RIP-relative addresses can't have additional register operands");

654 }

655 return true;

656 }

657 case TargetOpcode::G_CONSTANT_POOL: {

658

660 return false;

661

665 else if (STI.is64Bit())

667 AM.CP = true;

668 AM.Disp = I.getOperand(1).getIndex();

669 return true;

670 }

671 }

672

673 AM.Base.Reg = I.getOperand(0).getReg();

674 return true;

675}

676

677bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,

678 MachineRegisterInfo &MRI,

679 MachineFunction &MF) const {

680 unsigned Opc = I.getOpcode();

681

682 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&

683 "Only G_STORE and G_LOAD are expected for selection");

684

685 const Register DefReg = I.getOperand(0).getReg();

686 LLT Ty = MRI.getType(DefReg);

688

689 assert(I.hasOneMemOperand());

690 auto &MemOp = **I.memoperands_begin();

691 if (MemOp.isAtomic()) {

692

693

694

695

696

697 if (!MemOp.isUnordered()) {

698 LLVM_DEBUG(dbgs() << "Atomic ordering not supported yet\n");

699 return false;

700 }

702 LLVM_DEBUG(dbgs() << "Unaligned atomics not supported yet\n");

703 return false;

704 }

705 }

706

707 unsigned NewOpc = getPtrLoadStoreOp(Ty, RB, Opc);

708 if (NewOpc == Opc)

709 return false;

710

711 I.setDesc(TII.get(NewOpc));

712 MachineInstrBuilder MIB(MF, I);

713 MachineInstr *Ptr = MRI.getVRegDef(I.getOperand(1).getReg());

714

715 X86AddressMode AM;

717 return false;

718

719 if (Opc == TargetOpcode::G_LOAD) {

720 I.removeOperand(1);

722 } else {

723

724 I.removeOperand(1);

725 I.removeOperand(0);

727 }

729 I.addImplicitDefUseOperands(MF);

730 return Constrained;

731}

732

735 return X86::LEA64r;

738 else

740}

741

742bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,

743 MachineRegisterInfo &MRI,

744 MachineFunction &MF) const {

745 unsigned Opc = I.getOpcode();

746

747 assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_PTR_ADD) &&

748 "unexpected instruction");

749

750 const Register DefReg = I.getOperand(0).getReg();

751 LLT Ty = MRI.getType(DefReg);

752

753

754 unsigned NewOpc = getLeaOP(Ty, STI);

755 I.setDesc(TII.get(NewOpc));

756 MachineInstrBuilder MIB(MF, I);

757

758 if (Opc == TargetOpcode::G_FRAME_INDEX) {

760 } else {

761 MachineOperand &InxOp = I.getOperand(2);

762 I.addOperand(InxOp);

764 MIB.addImm(0).addReg(0);

765 }

766

768}

769

770bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,

771 MachineRegisterInfo &MRI,

772 MachineFunction &MF) const {

773 assert((I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&

774 "unexpected instruction");

775

776 X86AddressMode AM;

778 return false;

779

780 const Register DefReg = I.getOperand(0).getReg();

781 LLT Ty = MRI.getType(DefReg);

782 unsigned NewOpc = getLeaOP(Ty, STI);

783

784 I.setDesc(TII.get(NewOpc));

785 MachineInstrBuilder MIB(MF, I);

786

787 I.removeOperand(1);

789

791}

792

793bool X86InstructionSelector::selectConstant(MachineInstr &I,

794 MachineRegisterInfo &MRI,

795 MachineFunction &MF) const {

796 assert((I.getOpcode() == TargetOpcode::G_CONSTANT) &&

797 "unexpected instruction");

798

799 const Register DefReg = I.getOperand(0).getReg();

800 LLT Ty = MRI.getType(DefReg);

801

803 return false;

804

805 uint64_t Val = 0;

806 if (I.getOperand(1).isCImm()) {

807 Val = I.getOperand(1).getCImm()->getZExtValue();

808 I.getOperand(1).ChangeToImmediate(Val);

809 } else if (I.getOperand(1).isImm()) {

810 Val = I.getOperand(1).getImm();

811 } else

813

814 unsigned NewOpc;

816 case 8:

817 NewOpc = X86::MOV8ri;

818 break;

819 case 16:

820 NewOpc = X86::MOV16ri;

821 break;

822 case 32:

823 NewOpc = X86::MOV32ri;

824 break;

825 case 64:

826

828 NewOpc = X86::MOV64ri32;

829 else

830 NewOpc = X86::MOV64ri;

831 break;

832 default:

833 llvm_unreachable("Can't select G_CONSTANT, unsupported type.");

834 }

835

836 I.setDesc(TII.get(NewOpc));

838}

839

840

841

842

845 return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||

846 DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&

847 (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);

848}

849

850bool X86InstructionSelector::selectTurnIntoCOPY(

851 MachineInstr &I, MachineRegisterInfo &MRI, const Register DstReg,

852 const TargetRegisterClass *DstRC, const Register SrcReg,

853 const TargetRegisterClass *SrcRC) const {

854

858 << " operand\n");

859 return false;

860 }

861 I.setDesc(TII.get(X86::COPY));

862 return true;

863}

864

865bool X86InstructionSelector::selectTruncOrPtrToInt(MachineInstr &I,

866 MachineRegisterInfo &MRI,

867 MachineFunction &MF) const {

868 assert((I.getOpcode() == TargetOpcode::G_TRUNC ||

869 I.getOpcode() == TargetOpcode::G_PTRTOINT) &&

870 "unexpected instruction");

871

872 const Register DstReg = I.getOperand(0).getReg();

873 const Register SrcReg = I.getOperand(1).getReg();

874

875 const LLT DstTy = MRI.getType(DstReg);

876 const LLT SrcTy = MRI.getType(SrcReg);

877

878 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);

879 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);

880

881 if (DstRB.getID() != SrcRB.getID()) {

883 << " input/output on different banks\n");

884 return false;

885 }

886

887 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);

888 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);

889

890 if (!DstRC || !SrcRC)

891 return false;

892

893

894

895

897 return selectTurnIntoCOPY(I, MRI, DstReg, DstRC, SrcReg, SrcRC);

898

899 if (DstRB.getID() != X86::GPRRegBankID)

900 return false;

901

902 unsigned SubIdx;

903 if (DstRC == SrcRC) {

904

905 SubIdx = X86::NoSubRegister;

906 } else if (DstRC == &X86::GR32RegClass) {

907 SubIdx = X86::sub_32bit;

908 } else if (DstRC == &X86::GR16RegClass) {

909 SubIdx = X86::sub_16bit;

910 } else if (DstRC == &X86::GR8RegClass) {

911 SubIdx = X86::sub_8bit;

912 } else {

913 return false;

914 }

915

916 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);

917

921 << "\n");

922 return false;

923 }

924

925 I.getOperand(1).setSubReg(SubIdx);

926

927 I.setDesc(TII.get(X86::COPY));

928 return true;

929}

930

931bool X86InstructionSelector::selectZext(MachineInstr &I,

932 MachineRegisterInfo &MRI,

933 MachineFunction &MF) const {

934 assert((I.getOpcode() == TargetOpcode::G_ZEXT) && "unexpected instruction");

935

936 const Register DstReg = I.getOperand(0).getReg();

937 const Register SrcReg = I.getOperand(1).getReg();

938

939 const LLT DstTy = MRI.getType(DstReg);

940 const LLT SrcTy = MRI.getType(SrcReg);

941

943 "8=>16 Zext is handled by tablegen");

945 "8=>32 Zext is handled by tablegen");

947 "16=>32 Zext is handled by tablegen");

949 "8=>64 Zext is handled by tablegen");

951 "16=>64 Zext is handled by tablegen");

953 "32=>64 Zext is handled by tablegen");

954

956 return false;

957

958 unsigned AndOpc;

960 AndOpc = X86::AND8ri;

962 AndOpc = X86::AND16ri;

964 AndOpc = X86::AND32ri;

966 AndOpc = X86::AND64ri32;

967 else

968 return false;

969

974 BuildMI(*I.getParent(), I, I.getDebugLoc(),

975 TII.get(TargetOpcode::IMPLICIT_DEF), ImpDefReg);

976

977 DefReg = MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));

978 BuildMI(*I.getParent(), I, I.getDebugLoc(),

979 TII.get(TargetOpcode::INSERT_SUBREG), DefReg)

982 .addImm(X86::sub_8bit);

983 }

984

985 MachineInstr &AndInst =

986 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AndOpc), DstReg)

989

991

992 I.eraseFromParent();

993 return true;

994}

995

996bool X86InstructionSelector::selectAnyext(MachineInstr &I,

997 MachineRegisterInfo &MRI,

998 MachineFunction &MF) const {

999 assert((I.getOpcode() == TargetOpcode::G_ANYEXT) && "unexpected instruction");

1000

1001 const Register DstReg = I.getOperand(0).getReg();

1002 const Register SrcReg = I.getOperand(1).getReg();

1003

1004 const LLT DstTy = MRI.getType(DstReg);

1005 const LLT SrcTy = MRI.getType(SrcReg);

1006

1007 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);

1008 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);

1009

1011 "G_ANYEXT input/output on different banks\n");

1012

1014 "G_ANYEXT incorrect operand size");

1015

1016 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);

1017 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);

1018

1019

1020

1021

1023 return selectTurnIntoCOPY(I, MRI, SrcReg, SrcRC, DstReg, DstRC);

1024

1025 if (DstRB.getID() != X86::GPRRegBankID)

1026 return false;

1027

1031 << " operand\n");

1032 return false;

1033 }

1034

1035 if (SrcRC == DstRC) {

1036 I.setDesc(TII.get(X86::COPY));

1037 return true;

1038 }

1039

1040 BuildMI(*I.getParent(), I, I.getDebugLoc(),

1041 TII.get(TargetOpcode::SUBREG_TO_REG))

1045 .addImm(getSubRegIndex(SrcRC));

1046

1047 I.eraseFromParent();

1048 return true;

1049}

1050

1051bool X86InstructionSelector::selectCmp(MachineInstr &I,

1052 MachineRegisterInfo &MRI,

1053 MachineFunction &MF) const {

1054 assert((I.getOpcode() == TargetOpcode::G_ICMP) && "unexpected instruction");

1055

1057 bool SwapArgs;

1060

1063

1064 if (SwapArgs)

1066

1067 unsigned OpCmp;

1068 LLT Ty = MRI.getType(LHS);

1069

1071 default:

1072 return false;

1073 case 8:

1074 OpCmp = X86::CMP8rr;

1075 break;

1076 case 16:

1077 OpCmp = X86::CMP16rr;

1078 break;

1079 case 32:

1080 OpCmp = X86::CMP32rr;

1081 break;

1082 case 64:

1083 OpCmp = X86::CMP64rr;

1084 break;

1085 }

1086

1087 MachineInstr &CmpInst =

1088 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))

1091

1092 MachineInstr &SetInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),

1093 TII.get(X86::SETCCr), I.getOperand(0).getReg()).addImm(CC);

1094

1097

1098 I.eraseFromParent();

1099 return true;

1100}

1101

1102bool X86InstructionSelector::selectFCmp(MachineInstr &I,

1103 MachineRegisterInfo &MRI,

1104 MachineFunction &MF) const {

1105 assert((I.getOpcode() == TargetOpcode::G_FCMP) && "unexpected instruction");

1106

1107 Register LhsReg = I.getOperand(2).getReg();

1108 Register RhsReg = I.getOperand(3).getReg();

1111

1112

1113 static const uint16_t SETFOpcTable[2][3] = {

1116 const uint16_t *SETFOpc = nullptr;

1117 switch (Predicate) {

1118 default:

1119 break;

1121 SETFOpc = &SETFOpcTable[0][0];

1122 break;

1124 SETFOpc = &SETFOpcTable[1][0];

1125 break;

1126 }

1127

1129 "Both arguments of FCMP need to be virtual!");

1131 [[maybe_unused]] auto *RhsBank = RBI.getRegBank(RhsReg, MRI, TRI);

1132 assert((LhsBank == RhsBank) &&

1133 "Both banks assigned to FCMP arguments need to be same!");

1134

1135

1136 unsigned OpCmp;

1137 LLT Ty = MRI.getType(LhsReg);

1139 default:

1140 return false;

1141 case 32:

1142 OpCmp = LhsBank->getID() == X86::PSRRegBankID ? X86::UCOM_FpIr32

1143 : X86::UCOMISSrr;

1144 break;

1145 case 64:

1146 OpCmp = LhsBank->getID() == X86::PSRRegBankID ? X86::UCOM_FpIr64

1147 : X86::UCOMISDrr;

1148 break;

1149 case 80:

1150 OpCmp = X86::UCOM_FpIr80;

1151 break;

1152 }

1153

1154 Register ResultReg = I.getOperand(0).getReg();

1156 ResultReg,

1158 if (SETFOpc) {

1159 MachineInstr &CmpInst =

1160 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))

1163

1164 Register FlagReg1 = MRI.createVirtualRegister(&X86::GR8RegClass);

1165 Register FlagReg2 = MRI.createVirtualRegister(&X86::GR8RegClass);

1166 MachineInstr &Set1 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),

1167 TII.get(X86::SETCCr), FlagReg1).addImm(SETFOpc[0]);

1168 MachineInstr &Set2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),

1169 TII.get(X86::SETCCr), FlagReg2).addImm(SETFOpc[1]);

1170 MachineInstr &Set3 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),

1171 TII.get(SETFOpc[2]), ResultReg)

1178

1179 I.eraseFromParent();

1180 return true;

1181 }

1182

1184 bool SwapArgs;

1187

1188 if (SwapArgs)

1190

1191

1192 MachineInstr &CmpInst =

1193 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))

1196

1197 MachineInstr &Set =

1198 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SETCCr), ResultReg).addImm(CC);

1201 I.eraseFromParent();

1202 return true;

1203}

1204

1205bool X86InstructionSelector::selectUAddSub(MachineInstr &I,

1206 MachineRegisterInfo &MRI,

1207 MachineFunction &MF) const {

1208 assert((I.getOpcode() == TargetOpcode::G_UADDE ||

1209 I.getOpcode() == TargetOpcode::G_UADDO ||

1210 I.getOpcode() == TargetOpcode::G_USUBE ||

1211 I.getOpcode() == TargetOpcode::G_USUBO) &&

1212 "unexpected instruction");

1213

1215

1216 const Register DstReg = CarryMI.getDstReg();

1217 const Register CarryOutReg = CarryMI.getCarryOutReg();

1218 const Register Op0Reg = CarryMI.getLHSReg();

1219 const Register Op1Reg = CarryMI.getRHSReg();

1220 bool IsSub = CarryMI.isSub();

1221

1222 const LLT DstTy = MRI.getType(DstReg);

1223 assert(DstTy.isScalar() && "selectUAddSub only supported for scalar types");

1224

1225

1226 unsigned OpADC, OpADD, OpSBB, OpSUB;

1228 case 8:

1229 OpADC = X86::ADC8rr;

1230 OpADD = X86::ADD8rr;

1231 OpSBB = X86::SBB8rr;

1232 OpSUB = X86::SUB8rr;

1233 break;

1234 case 16:

1235 OpADC = X86::ADC16rr;

1236 OpADD = X86::ADD16rr;

1237 OpSBB = X86::SBB16rr;

1238 OpSUB = X86::SUB16rr;

1239 break;

1240 case 32:

1241 OpADC = X86::ADC32rr;

1242 OpADD = X86::ADD32rr;

1243 OpSBB = X86::SBB32rr;

1244 OpSUB = X86::SUB32rr;

1245 break;

1246 case 64:

1247 OpADC = X86::ADC64rr;

1248 OpADD = X86::ADD64rr;

1249 OpSBB = X86::SBB64rr;

1250 OpSUB = X86::SUB64rr;

1251 break;

1252 default:

1254 }

1255

1256 const RegisterBank &CarryRB = *RBI.getRegBank(CarryOutReg, MRI, TRI);

1257 const TargetRegisterClass *CarryRC =

1259

1260 unsigned Opcode = IsSub ? OpSUB : OpADD;

1261

1262

1264 Register CarryInReg = CarryInMI->getCarryInReg();

1265 MachineInstr *Def = MRI.getVRegDef(CarryInReg);

1266 while (Def->getOpcode() == TargetOpcode::G_TRUNC) {

1267 CarryInReg = Def->getOperand(1).getReg();

1268 Def = MRI.getVRegDef(CarryInReg);

1269 }

1270

1271

1272 if (Def->getOpcode() == TargetOpcode::G_UADDE ||

1273 Def->getOpcode() == TargetOpcode::G_UADDO ||

1274 Def->getOpcode() == TargetOpcode::G_USUBE ||

1275 Def->getOpcode() == TargetOpcode::G_USUBO) {

1276

1277

1278 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::CMP8ri))

1281

1283 return false;

1284

1285 Opcode = IsSub ? OpSBB : OpADC;

1287

1288 if (*val != 0)

1289 return false;

1290

1291 Opcode = IsSub ? OpSUB : OpADD;

1292 } else

1293 return false;

1294 }

1295

1296 MachineInstr &Inst =

1297 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)

1300

1301 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SETCCr), CarryOutReg)

1303

1306 return false;

1307

1308 I.eraseFromParent();

1309 return true;

1310}

1311

1312bool X86InstructionSelector::selectExtract(MachineInstr &I,

1313 MachineRegisterInfo &MRI,

1314 MachineFunction &MF) const {

1315 assert((I.getOpcode() == TargetOpcode::G_EXTRACT) &&

1316 "unexpected instruction");

1317

1318 const Register DstReg = I.getOperand(0).getReg();

1319 const Register SrcReg = I.getOperand(1).getReg();

1320 int64_t Index = I.getOperand(2).getImm();

1321

1322 const LLT DstTy = MRI.getType(DstReg);

1323 const LLT SrcTy = MRI.getType(SrcReg);

1324

1325

1327 return false;

1328

1330 return false;

1331

1332 if (Index == 0) {

1333

1334 if (!emitExtractSubreg(DstReg, SrcReg, I, MRI, MF))

1335 return false;

1336

1337 I.eraseFromParent();

1338 return true;

1339 }

1340

1341 bool HasAVX = STI.hasAVX();

1342 bool HasAVX512 = STI.hasAVX512();

1343 bool HasVLX = STI.hasVLX();

1344

1346 if (HasVLX)

1347 I.setDesc(TII.get(X86::VEXTRACTF32X4Z256rri));

1348 else if (HasAVX)

1349 I.setDesc(TII.get(X86::VEXTRACTF128rri));

1350 else

1351 return false;

1352 } else if (SrcTy.getSizeInBits() == 512 && HasAVX512) {

1354 I.setDesc(TII.get(X86::VEXTRACTF32X4Zrri));

1356 I.setDesc(TII.get(X86::VEXTRACTF64X4Zrri));

1357 else

1358 return false;

1359 } else

1360 return false;

1361

1362

1364 I.getOperand(2).setImm(Index);

1365

1367}

1368

1369bool X86InstructionSelector::emitExtractSubreg(Register DstReg, Register SrcReg,

1370 MachineInstr &I,

1371 MachineRegisterInfo &MRI,

1372 MachineFunction &MF) const {

1373 const LLT DstTy = MRI.getType(DstReg);

1374 const LLT SrcTy = MRI.getType(SrcReg);

1375 unsigned SubIdx = X86::NoSubRegister;

1376

1378 return false;

1379

1381 "Incorrect Src/Dst register size");

1382

1384 SubIdx = X86::sub_xmm;

1386 SubIdx = X86::sub_ymm;

1387 else

1388 return false;

1389

1390 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);

1391 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);

1392

1393 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);

1394

1397 LLVM_DEBUG(dbgs() << "Failed to constrain EXTRACT_SUBREG\n");

1398 return false;

1399 }

1400

1401 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), DstReg)

1402 .addReg(SrcReg, 0, SubIdx);

1403

1404 return true;

1405}

1406

1407bool X86InstructionSelector::emitInsertSubreg(Register DstReg, Register SrcReg,

1408 MachineInstr &I,

1409 MachineRegisterInfo &MRI,

1410 MachineFunction &MF) const {

1411 const LLT DstTy = MRI.getType(DstReg);

1412 const LLT SrcTy = MRI.getType(SrcReg);

1413 unsigned SubIdx = X86::NoSubRegister;

1414

1415

1417 return false;

1418

1420 "Incorrect Src/Dst register size");

1421

1423 SubIdx = X86::sub_xmm;

1425 SubIdx = X86::sub_ymm;

1426 else

1427 return false;

1428

1429 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);

1430 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);

1431

1434 LLVM_DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");

1435 return false;

1436 }

1437

1438 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY))

1441

1442 return true;

1443}

1444

1445bool X86InstructionSelector::selectInsert(MachineInstr &I,

1446 MachineRegisterInfo &MRI,

1447 MachineFunction &MF) const {

1448 assert((I.getOpcode() == TargetOpcode::G_INSERT) && "unexpected instruction");

1449

1450 const Register DstReg = I.getOperand(0).getReg();

1451 const Register SrcReg = I.getOperand(1).getReg();

1452 const Register InsertReg = I.getOperand(2).getReg();

1453 int64_t Index = I.getOperand(3).getImm();

1454

1455 const LLT DstTy = MRI.getType(DstReg);

1456 const LLT InsertRegTy = MRI.getType(InsertReg);

1457

1458

1460 return false;

1461

1463 return false;

1464

1465 if (Index == 0 && MRI.getVRegDef(SrcReg)->isImplicitDef()) {

1466

1467 if (!emitInsertSubreg(DstReg, InsertReg, I, MRI, MF))

1468 return false;

1469

1470 I.eraseFromParent();

1471 return true;

1472 }

1473

1474 bool HasAVX = STI.hasAVX();

1475 bool HasAVX512 = STI.hasAVX512();

1476 bool HasVLX = STI.hasVLX();

1477

1479 if (HasVLX)

1480 I.setDesc(TII.get(X86::VINSERTF32X4Z256rri));

1481 else if (HasAVX)

1482 I.setDesc(TII.get(X86::VINSERTF128rri));

1483 else

1484 return false;

1485 } else if (DstTy.getSizeInBits() == 512 && HasAVX512) {

1487 I.setDesc(TII.get(X86::VINSERTF32X4Zrri));

1489 I.setDesc(TII.get(X86::VINSERTF64X4Zrri));

1490 else

1491 return false;

1492 } else

1493 return false;

1494

1495

1497

1498 I.getOperand(3).setImm(Index);

1499

1501}

1502

1503bool X86InstructionSelector::selectUnmergeValues(

1504 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) {

1505 assert((I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&

1506 "unexpected instruction");

1507

1508

1509 unsigned NumDefs = I.getNumOperands() - 1;

1510 Register SrcReg = I.getOperand(NumDefs).getReg();

1511 unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();

1512

1513 for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {

1514 MachineInstr &ExtrInst =

1515 *BuildMI(*I.getParent(), I, I.getDebugLoc(),

1516 TII.get(TargetOpcode::G_EXTRACT), I.getOperand(Idx).getReg())

1518 .addImm(Idx * DefSize);

1519

1520 if (!select(ExtrInst))

1521 return false;

1522 }

1523

1524 I.eraseFromParent();

1525 return true;

1526}

1527

1528bool X86InstructionSelector::selectMergeValues(

1529 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) {

1530 assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES ||

1531 I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS) &&

1532 "unexpected instruction");

1533

1534

1535 Register DstReg = I.getOperand(0).getReg();

1536 Register SrcReg0 = I.getOperand(1).getReg();

1537

1538 const LLT DstTy = MRI.getType(DstReg);

1539 const LLT SrcTy = MRI.getType(SrcReg0);

1541

1542 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);

1543

1544

1545 Register DefReg = MRI.createGenericVirtualRegister(DstTy);

1546 MRI.setRegBank(DefReg, RegBank);

1547 if (!emitInsertSubreg(DefReg, I.getOperand(1).getReg(), I, MRI, MF))

1548 return false;

1549

1550 for (unsigned Idx = 2; Idx < I.getNumOperands(); ++Idx) {

1551 Register Tmp = MRI.createGenericVirtualRegister(DstTy);

1552 MRI.setRegBank(Tmp, RegBank);

1553

1554 MachineInstr &InsertInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),

1555 TII.get(TargetOpcode::G_INSERT), Tmp)

1557 .addReg(I.getOperand(Idx).getReg())

1558 .addImm((Idx - 1) * SrcSize);

1559

1560 DefReg = Tmp;

1561

1562 if (!select(InsertInst))

1563 return false;

1564 }

1565

1566 MachineInstr &CopyInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),

1567 TII.get(TargetOpcode::COPY), DstReg)

1569

1570 if (!select(CopyInst))

1571 return false;

1572

1573 I.eraseFromParent();

1574 return true;

1575}

1576

1577bool X86InstructionSelector::selectCondBranch(MachineInstr &I,

1578 MachineRegisterInfo &MRI,

1579 MachineFunction &MF) const {

1580 assert((I.getOpcode() == TargetOpcode::G_BRCOND) && "unexpected instruction");

1581

1582 const Register CondReg = I.getOperand(0).getReg();

1583 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();

1584

1585 MachineInstr &TestInst =

1586 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TEST8ri))

1589 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::JCC_1))

1591

1593

1594 I.eraseFromParent();

1595 return true;

1596}

1597

1598bool X86InstructionSelector::materializeFP(MachineInstr &I,

1599 MachineRegisterInfo &MRI,

1600 MachineFunction &MF) const {

1601 assert((I.getOpcode() == TargetOpcode::G_FCONSTANT) &&

1602 "unexpected instruction");

1603

1604

1607 return false;

1608

1609 const Register DstReg = I.getOperand(0).getReg();

1610 const LLT DstTy = MRI.getType(DstReg);

1611 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);

1612

1613 const ConstantFP *CFP = I.getOperand(1).getFPImm();

1615 Align Alignment = DL.getPrefTypeAlign(CFP->getType());

1616 const DebugLoc &DbgLoc = I.getDebugLoc();

1617

1618 unsigned Opc =

1619 getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Alignment);

1620

1622 MachineInstr *LoadInst = nullptr;

1624

1626

1627

1628

1629 Register AddrReg = MRI.createVirtualRegister(&X86::GR64RegClass);

1630 BuildMI(*I.getParent(), I, DbgLoc, TII.get(X86::MOV64ri), AddrReg)

1632

1635 LLT::pointer(0, DL.getPointerSizeInBits()), Alignment);

1636

1637 LoadInst =

1639 AddrReg)

1641

1643

1644

1645

1646

1647 unsigned PICBase = 0;

1649

1650

1651 return false;

1653 PICBase = X86::RIP;

1654

1656 BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg), CPI, PICBase,

1657 OpFlag);

1658 } else

1659 return false;

1660

1662 I.eraseFromParent();

1663 return true;

1664}

1665

1666bool X86InstructionSelector::selectImplicitDefOrPHI(

1667 MachineInstr &I, MachineRegisterInfo &MRI) const {

1668 assert((I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||

1669 I.getOpcode() == TargetOpcode::G_PHI) &&

1670 "unexpected instruction");

1671

1672 Register DstReg = I.getOperand(0).getReg();

1673

1674 if (MRI.getRegClassOrNull(DstReg)) {

1675 const LLT DstTy = MRI.getType(DstReg);

1676 const TargetRegisterClass *RC = getRegClass(DstTy, DstReg, MRI);

1677

1680 << " operand\n");

1681 return false;

1682 }

1683 }

1684

1685 if (I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)

1686 I.setDesc(TII.get(X86::IMPLICIT_DEF));

1687 else

1688 I.setDesc(TII.get(X86::PHI));

1689

1690 return true;

1691}

1692

1693bool X86InstructionSelector::selectMulDivRem(MachineInstr &I,

1694 MachineRegisterInfo &MRI,

1695 MachineFunction &MF) const {

1696

1697 assert((I.getOpcode() == TargetOpcode::G_MUL ||

1698 I.getOpcode() == TargetOpcode::G_SMULH ||

1699 I.getOpcode() == TargetOpcode::G_UMULH ||

1700 I.getOpcode() == TargetOpcode::G_SDIV ||

1701 I.getOpcode() == TargetOpcode::G_SREM ||

1702 I.getOpcode() == TargetOpcode::G_UDIV ||

1703 I.getOpcode() == TargetOpcode::G_UREM) &&

1704 "unexpected instruction");

1705

1706 const Register DstReg = I.getOperand(0).getReg();

1707 const Register Op1Reg = I.getOperand(1).getReg();

1708 const Register Op2Reg = I.getOperand(2).getReg();

1709

1710 const LLT RegTy = MRI.getType(DstReg);

1711 assert(RegTy == MRI.getType(Op1Reg) && RegTy == MRI.getType(Op2Reg) &&

1712 "Arguments and return value types must match");

1713

1714 const RegisterBank *RegRB = RBI.getRegBank(DstReg, MRI, TRI);

1715 if (!RegRB || RegRB->getID() != X86::GPRRegBankID)

1716 return false;

1717

1718 const static unsigned NumTypes = 4;

1719 const static unsigned NumOps = 7;

1720 const static bool S = true;

1721 const static bool U = false;

1722 const static unsigned Copy = TargetOpcode::COPY;

1723

1724

1725

1726

1727

1728

1729

1730

1731

1732 const static struct MulDivRemEntry {

1733

1734 unsigned SizeInBits;

1735 unsigned LowInReg;

1736 unsigned HighInReg;

1737

1738 struct MulDivRemResult {

1739 unsigned OpMulDivRem;

1740 unsigned OpSignExtend;

1741

1742 unsigned OpCopy;

1743

1744 unsigned ResultReg;

1745 bool IsOpSigned;

1746 } ResultTable[NumOps];

1747 } OpTable[NumTypes] = {

1748 {8,

1749 X86::AX,

1750 0,

1751 {

1752 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S},

1753 {X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S},

1754 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U},

1755 {X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U},

1756 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AL, S},

1757 {X86::IMUL8r, 0, X86::MOVSX16rr8, X86::AH, S},

1758 {X86::MUL8r, 0, X86::MOVZX16rr8, X86::AH, U},

1759 }},

1760 {16,

1761 X86::AX,

1762 X86::DX,

1763 {

1764 {X86::IDIV16r, X86::CWD, Copy, X86::AX, S},

1765 {X86::IDIV16r, X86::CWD, Copy, X86::DX, S},

1766 {X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U},

1767 {X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U},

1768 {X86::IMUL16r, X86::MOV32r0, Copy, X86::AX, S},

1769 {X86::IMUL16r, X86::MOV32r0, Copy, X86::DX, S},

1770 {X86::MUL16r, X86::MOV32r0, Copy, X86::DX, U},

1771 }},

1772 {32,

1773 X86::EAX,

1774 X86::EDX,

1775 {

1776 {X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S},

1777 {X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S},

1778 {X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U},

1779 {X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U},

1780 {X86::IMUL32r, X86::MOV32r0, Copy, X86::EAX, S},

1781 {X86::IMUL32r, X86::MOV32r0, Copy, X86::EDX, S},

1782 {X86::MUL32r, X86::MOV32r0, Copy, X86::EDX, U},

1783 }},

1784 {64,

1785 X86::RAX,

1786 X86::RDX,

1787 {

1788 {X86::IDIV64r, X86::CQO, Copy, X86::RAX, S},

1789 {X86::IDIV64r, X86::CQO, Copy, X86::RDX, S},

1790 {X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U},

1791 {X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U},

1792 {X86::IMUL64r, X86::MOV32r0, Copy, X86::RAX, S},

1793 {X86::IMUL64r, X86::MOV32r0, Copy, X86::RDX, S},

1794 {X86::MUL64r, X86::MOV32r0, Copy, X86::RDX, U},

1795 }},

1796 };

1797

1798 auto OpEntryIt = llvm::find_if(OpTable, [RegTy](const MulDivRemEntry &El) {

1800 });

1801 if (OpEntryIt == std::end(OpTable))

1802 return false;

1803

1805 switch (I.getOpcode()) {

1806 default:

1808 case TargetOpcode::G_SDIV:

1810 break;

1811 case TargetOpcode::G_SREM:

1813 break;

1814 case TargetOpcode::G_UDIV:

1816 break;

1817 case TargetOpcode::G_UREM:

1819 break;

1820 case TargetOpcode::G_MUL:

1822 break;

1823 case TargetOpcode::G_SMULH:

1825 break;

1826 case TargetOpcode::G_UMULH:

1828 break;

1829 }

1830

1831 const MulDivRemEntry &TypeEntry = *OpEntryIt;

1832 const MulDivRemEntry::MulDivRemResult &OpEntry =

1834

1835 const TargetRegisterClass *RegRC = getRegClass(RegTy, *RegRB);

1840 << " operand\n");

1841 return false;

1842 }

1843

1844

1845 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpCopy),

1848

1849

1850 if (OpEntry.OpSignExtend) {

1851 if (OpEntry.IsOpSigned)

1852 BuildMI(*I.getParent(), I, I.getDebugLoc(),

1853 TII.get(OpEntry.OpSignExtend));

1854 else {

1855 Register Zero32 = MRI.createVirtualRegister(&X86::GR32RegClass);

1856 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::MOV32r0),

1857 Zero32);

1858

1859

1860

1861

1865 .addReg(Zero32, 0, X86::sub_16bit);

1871 BuildMI(*I.getParent(), I, I.getDebugLoc(),

1872 TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)

1875 .addImm(X86::sub_32bit);

1876 }

1877 }

1878 }

1879

1880

1881 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpEntry.OpMulDivRem))

1883

1884

1885

1886

1887

1888

1889

1890

1891

1892 if (OpEntry.ResultReg == X86::AH && STI.is64Bit()) {

1893 Register SourceSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass);

1894 Register ResultSuperReg = MRI.createVirtualRegister(&X86::GR16RegClass);

1895 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Copy), SourceSuperReg)

1897

1898

1899 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SHR16ri),

1900 ResultSuperReg)

1901 .addReg(SourceSuperReg)

1903

1904

1905 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),

1906 DstReg)

1907 .addReg(ResultSuperReg, 0, X86::sub_8bit);

1908 } else {

1909 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),

1910 DstReg)

1911 .addReg(OpEntry.ResultReg);

1912 }

1913 I.eraseFromParent();

1914

1915 return true;

1916}

1917

1918bool X86InstructionSelector::selectSelect(MachineInstr &I,

1919 MachineRegisterInfo &MRI,

1920 MachineFunction &MF) const {

1926

1927 unsigned OpCmp;

1928 LLT Ty = MRI.getType(DstReg);

1931 DstReg)

1934 } else {

1936 default:

1937 return false;

1938 case 8:

1939 OpCmp = X86::CMOV_GR8;

1940 break;

1941 case 16:

1942 OpCmp = STI.canUseCMOV() ? X86::CMOV16rr : X86::CMOV_GR16;

1943 break;

1944 case 32:

1945 OpCmp = STI.canUseCMOV() ? X86::CMOV32rr : X86::CMOV_GR32;

1946 break;

1947 case 64:

1949 OpCmp = X86::CMOV64rr;

1950 break;

1951 }

1956 }

1957 const TargetRegisterClass *DstRC = getRegClass(Ty, DstReg, MRI);

1960 return false;

1961 }

1962

1964 return true;

1965}

1966

1967InstructionSelector::ComplexRendererFns

1968X86InstructionSelector::selectAddr(MachineOperand &Root) const {

1970 MachineIRBuilder MIRBuilder(*MI);

1971

1972 MachineRegisterInfo &MRI = MI->getMF()->getRegInfo();

1973 MachineInstr *Ptr = MRI.getVRegDef(Root.getReg());

1974 X86AddressMode AM;

1976

1978 return std::nullopt;

1979

1980 return {

1981 {[=](MachineInstrBuilder &MIB) {

1983 MIB.addUse(AM.Base.Reg);

1984 else {

1986 "Unknown type of address base");

1988 }

1989 },

1990

1991 [=](MachineInstrBuilder &MIB) { MIB.addImm(AM.Scale); },

1992

1993 [=](MachineInstrBuilder &MIB) { MIB.addUse(0); },

1994

1995 [=](MachineInstrBuilder &MIB) {

1996 if (AM.GV)

1998 else if (AM.CP)

1999 MIB.addConstantPoolIndex(AM.Disp, 0, AM.GVOpFlags);

2000 else

2001 MIB.addImm(AM.Disp);

2002 },

2003

2004 [=](MachineInstrBuilder &MIB) { MIB.addUse(0); }}};

2005}

2006

2007InstructionSelector *

2011 return new X86InstructionSelector(TM, Subtarget, RBI);

2012}

unsigned const MachineRegisterInfo * MRI

static const TargetRegisterClass * getRegClass(const MachineInstr &MI, Register Reg)

#define GET_GLOBALISEL_PREDICATES_INIT

#define GET_GLOBALISEL_TEMPORARIES_INIT

static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)

static bool selectDebugInstr(MachineInstr &I, MachineRegisterInfo &MRI, const RegisterBankInfo &RBI)

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

const TargetInstrInfo & TII

static bool selectMergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)

static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...

const size_t AbstractManglingParser< Derived, Alloc >::NumOps

Implement a low-level type suitable for MachineInstr level instruction selection.

This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...

Register const TargetRegisterInfo * TRI

Promote Memory to Register

static unsigned selectLoadStoreOp(unsigned GenericOpc, unsigned RegBankID, unsigned OpSize)

static StringRef getName(Value *V)

static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)

static bool X86SelectAddress(MachineInstr &I, const X86TargetMachine &TM, const MachineRegisterInfo &MRI, const X86Subtarget &STI, X86AddressMode &AM)

Definition X86InstructionSelector.cpp:605

static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC, const TargetRegisterClass *SrcRC)

Definition X86InstructionSelector.cpp:843

static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI)

Definition X86InstructionSelector.cpp:733

static const TargetRegisterClass * getRegClassFromGRPhysReg(Register Reg)

Definition X86InstructionSelector.cpp:234

This file declares the targeting of the RegisterBankInfo class for X86.

Predicate

This enumeration lists the possible predicates for CmpInst subclasses.

@ FCMP_OEQ

0 0 0 1 True if ordered and equal

@ FCMP_UNE

1 1 1 0 True if unordered or not equal

Register getCondReg() const

Register getFalseReg() const

Register getTrueReg() const

Register getReg(unsigned Idx) const

Access the Idx'th operand as a register and return it.

constexpr bool isScalar() const

static constexpr LLT scalar(unsigned SizeInBits)

Get a low-level scalar or aggregate "bag of bits".

constexpr bool isVector() const

static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)

Get a low-level pointer in the given address space.

constexpr TypeSize getSizeInBits() const

Returns the total size of the type. Must only be called on sized types.

constexpr bool isPointer() const

const MCInstrDesc & get(unsigned Opcode) const

Return the machine instruction descriptor that corresponds to the specified instruction opcode.

StringRef getName(unsigned Opcode) const

Returns the name for the instructions with the given opcode.

const MachineFunction * getParent() const

Return the MachineFunction containing this basic block.

unsigned getConstantPoolIndex(const Constant *C, Align Alignment)

getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.

MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)

getMachineMemOperand - Allocate a new MachineMemOperand.

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

const DataLayout & getDataLayout() const

Return the DataLayout attached to the Module associated to this MF.

MachineConstantPool * getConstantPool()

getConstantPool - Return the constant pool object for the current function.

const MachineInstrBuilder & addImm(int64_t Val) const

Add a new immediate operand.

const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const

const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const

Add a new virtual register operand.

const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const

const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const

Add a virtual register use operand.

const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const

const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const

Add a virtual register definition operand.

Representation of each machine instruction.

const MachineBasicBlock * getParent() const

const DebugLoc & getDebugLoc() const

Returns the debug location id of this MachineInstr.

LLVM_ABI void eraseFromParent()

Unlink 'this' from the containing basic block and delete it.

@ MOLoad

The memory access reads data.

MachineOperand class - Representation of each machine instruction operand.

LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)

ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.

MachineInstr * getParent()

getParent - Return the instruction that this operand belongs to.

Register getReg() const

getReg - Returns the register number.

MachineRegisterInfo - Keep track of information for virtual and physical registers,...

static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)

Constrain the (possibly generic) virtual register Reg to RC.

const RegisterBank & getRegBank(unsigned ID)

Get the register bank identified by ID.

TypeSize getSizeInBits(Register Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI) const

Get the size in bits of Reg.

This class implements the register bank concept.

unsigned getID() const

Get the identifier of this register bank.

Wrapper class representing virtual and physical registers.

constexpr bool isVirtual() const

Return true if the specified register number is in the virtual register namespace.

constexpr bool isPhysical() const

Return true if the specified register number is in the physical register namespace.

CodeModel::Model getCodeModel() const

Returns the code model.

bool hasSubClassEq(const TargetRegisterClass *RC) const

Returns true if RC is a sub-class of or equal to this class.

Type * getType() const

All values are typed, get the type of this value.

Register getGlobalBaseReg(MachineFunction *MF) const

getGlobalBaseReg - Return a virtual register initialized with the the global base register value.

This class provides the information for the target register banks.

bool isTarget64BitILP32() const

Is this x86_64 with the ILP32 programming model (x32 ABI)?

const X86InstrInfo * getInstrInfo() const override

unsigned char classifyGlobalReference(const GlobalValue *GV, const Module &M) const

bool isPICStyleRIPRel() const

unsigned char classifyLocalReference(const GlobalValue *GV) const

Classify a global variable reference for the current subtarget according to how we should reference i...

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

constexpr char Align[]

Key for Kernel::Arg::Metadata::mAlign.

Predicate

Predicate - These are "(BI << 5) | BO" for various predicates.

@ X86

Windows x64, Windows Itanium (IA-64)

@ MO_GOTOFF

MO_GOTOFF - On a symbol operand this indicates that the immediate is the offset to the location of th...

@ MO_PIC_BASE_OFFSET

MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the immediate should get the value of th...

std::pair< CondCode, bool > getX86ConditionCode(CmpInst::Predicate Predicate)

Return a pair of condition code for the given predicate and whether the instruction operands should b...

StringMapEntry< std::atomic< TypeEntryBody * > > TypeEntry

NodeAddr< DefNode * > Def

This is an optimization pass for GlobalISel generic memory operations.

static bool isGlobalStubReference(unsigned char TargetFlag)

isGlobalStubReference - Return true if the specified TargetFlag operand is a reference to a stub for ...

static bool isGlobalRelativeToPICBase(unsigned char TargetFlag)

isGlobalRelativeToPICBase - Return true if the specified global value reference is relative to a 32-b...

PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank

Convenient type to represent either a register class or a register bank.

MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)

Builder interface. Specify how to create the initial instruction itself.

LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)

If VReg is defined by a G_CONSTANT, return the corresponding value.

constexpr bool isInt(int64_t x)

Checks if an integer fits into the given bit width.

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)

Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...

bool isPreISelGenericOpcode(unsigned Opcode)

Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.

static const MachineInstrBuilder & addConstantPoolReference(const MachineInstrBuilder &MIB, unsigned CPI, Register GlobalBaseReg, unsigned char OpFlags)

addConstantPoolReference - This function is used to add a reference to the base of a constant value s...

auto dyn_cast_if_present(const Y &Val)

dyn_cast_if_present - Functionally identical to dyn_cast, except that a null (or none in the case ...

static const MachineInstrBuilder & addFullAddress(const MachineInstrBuilder &MIB, const X86AddressMode &AM)

LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)

If VReg is defined by a G_CONSTANT fits in int64_t returns it.

LLVM_ABI raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

static const MachineInstrBuilder & addOffset(const MachineInstrBuilder &MIB, int Offset)

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

auto find_if(R &&Range, UnaryPredicate P)

Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.

static const MachineInstrBuilder & addDirectMem(const MachineInstrBuilder &MIB, Register Reg)

addDirectMem - This function is used to add a direct memory reference to the current instruction – th...

InstructionSelector * createX86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &, const X86RegisterBankInfo &)

Definition X86InstructionSelector.cpp:2008

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

This struct is a compact representation of a valid (non-zero power of two) alignment.

static LLVM_ABI MachinePointerInfo getConstantPool(MachineFunction &MF)

Return a MachinePointerInfo record that refers to the constant pool.

X86AddressMode - This struct holds a generalized full x86 address mode.

union llvm::X86AddressMode::BaseUnion Base

enum llvm::X86AddressMode::@202116273335065351270200035056227005202106004277 BaseType