LLVM: lib/Target/ARM/ARMFastISel.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

14

78#include

79#include

80#include

81

82using namespace llvm;

83

84namespace {

85

86

87class Address {

88public:

89 enum BaseKind { RegBase, FrameIndexBase };

90

91private:

92 BaseKind Kind = RegBase;

93 union {

94 unsigned Reg;

95 int FI;

96 } Base;

97

98 int Offset = 0;

99

100public:

101

102 Address() { Base.Reg = 0; }

103

104 void setKind(BaseKind K) { Kind = K; }

105 BaseKind getKind() const { return Kind; }

106 bool isRegBase() const { return Kind == RegBase; }

107 bool isFIBase() const { return Kind == FrameIndexBase; }

108

110 assert(isRegBase() && "Invalid base register access!");

111 Base.Reg = Reg.id();

112 }

113

115 assert(isRegBase() && "Invalid base register access!");

116 return Base.Reg;

117 }

118

119 void setFI(int FI) {

120 assert(isFIBase() && "Invalid base frame index access!");

121 Base.FI = FI;

122 }

123

124 int getFI() const {

125 assert(isFIBase() && "Invalid base frame index access!");

126 return Base.FI;

127 }

128

129 void setOffset(int O) { Offset = O; }

130 int getOffset() { return Offset; }

131};

132

133class ARMFastISel final : public FastISel {

134

135

136 const ARMSubtarget *Subtarget;

138 const ARMBaseInstrInfo &TII;

139 const ARMTargetLowering &TLI;

140 const ARMBaseTargetMachine &TM;

141 ARMFunctionInfo *AFI;

142

143

144 bool isThumb2;

145 LLVMContext *Context;

146

147 public:

148 explicit ARMFastISel(FunctionLoweringInfo &funcInfo,

149 const TargetLibraryInfo *libInfo)

150 : FastISel(funcInfo, libInfo),

151 Subtarget(&funcInfo.MF->getSubtarget()),

153 TII(*Subtarget->getInstrInfo()), TLI(*Subtarget->getTargetLowering()),

154 TM(TLI.getTM()) {

155 AFI = funcInfo.MF->getInfo();

156 isThumb2 = AFI->isThumbFunction();

158 }

159

160 private:

161

162

163 Register fastEmitInst_r(unsigned MachineInstOpcode,

164 const TargetRegisterClass *RC, Register Op0);

165 Register fastEmitInst_rr(unsigned MachineInstOpcode,

166 const TargetRegisterClass *RC, Register Op0,

168 Register fastEmitInst_ri(unsigned MachineInstOpcode,

169 const TargetRegisterClass *RC, Register Op0,

170 uint64_t Imm);

171 Register fastEmitInst_i(unsigned MachineInstOpcode,

172 const TargetRegisterClass *RC, uint64_t Imm);

173

174

175

176 bool fastSelectInstruction(const Instruction *I) override;

177 Register fastMaterializeConstant(const Constant *C) override;

178 Register fastMaterializeAlloca(const AllocaInst *AI) override;

179 bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,

180 const LoadInst *LI) override;

181 bool fastLowerArguments() override;

182

183#include "ARMGenFastISel.inc"

184

185

186

187 bool SelectLoad(const Instruction *I);

188 bool SelectStore(const Instruction *I);

189 bool SelectBranch(const Instruction *I);

190 bool SelectIndirectBr(const Instruction *I);

191 bool SelectCmp(const Instruction *I);

192 bool SelectFPExt(const Instruction *I);

193 bool SelectFPTrunc(const Instruction *I);

194 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode);

195 bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode);

196 bool SelectIToFP(const Instruction *I, bool isSigned);

197 bool SelectFPToI(const Instruction *I, bool isSigned);

198 bool SelectDiv(const Instruction *I, bool isSigned);

199 bool SelectRem(const Instruction *I, bool isSigned);

200 bool SelectCall(const Instruction *I, const char *IntrMemName);

201 bool SelectIntrinsicCall(const IntrinsicInst &I);

202 bool SelectSelect(const Instruction *I);

203 bool SelectRet(const Instruction *I);

204 bool SelectTrunc(const Instruction *I);

205 bool SelectIntExt(const Instruction *I);

206 bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy);

207

208

209

210 bool isPositionIndependent() const;

211 bool isTypeLegal(Type *Ty, MVT &VT);

212 bool isLoadTypeLegal(Type *Ty, MVT &VT);

213 bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,

214 bool isZExt);

215 bool ARMEmitLoad(MVT VT, Register &ResultReg, Address &Addr,

216 MaybeAlign Alignment = std::nullopt, bool isZExt = true,

217 bool allocReg = true);

218 bool ARMEmitStore(MVT VT, Register SrcReg, Address &Addr,

219 MaybeAlign Alignment = std::nullopt);

220 bool ARMComputeAddress(const Value *Obj, Address &Addr);

221 void ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3);

222 bool ARMIsMemCpySmall(uint64_t Len);

223 bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len,

224 MaybeAlign Alignment);

225 Register ARMEmitIntExt(MVT SrcVT, Register SrcReg, MVT DestVT, bool isZExt);

226 Register ARMMaterializeFP(const ConstantFP *CFP, MVT VT);

227 Register ARMMaterializeInt(const Constant *C, MVT VT);

228 Register ARMMaterializeGV(const GlobalValue *GV, MVT VT);

231 unsigned ARMSelectCallOp(bool UseReg);

232 Register ARMLowerPICELF(const GlobalValue *GV, MVT VT);

233

234 const TargetLowering *getTargetLowering() { return &TLI; }

235

236

237

238 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC,

239 bool Return,

240 bool isVarArg);

241 bool ProcessCallArgs(SmallVectorImpl<Value*> &Args,

242 SmallVectorImpl &ArgRegs,

243 SmallVectorImpl &ArgVTs,

244 SmallVectorImplISD::ArgFlagsTy &ArgFlags,

245 SmallVectorImpl &RegArgs,

246 CallingConv::ID CC,

247 unsigned &NumBytes,

248 bool isVarArg);

249 Register getLibcallReg(const Twine &Name);

250 bool FinishCall(MVT RetVT, SmallVectorImpl &UsedRegs,

251 const Instruction *I, CallingConv::ID CC,

252 unsigned &NumBytes, bool isVarArg);

253 bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call);

254

255

256

257 bool isARMNEONPred(const MachineInstr *MI);

258 bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);

259 const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);

260 void AddLoadStoreOperands(MVT VT, Address &Addr,

261 const MachineInstrBuilder &MIB,

263};

264

265}

266

267

268

269

270bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {

271 if (MI->hasOptionalDef())

272 return false;

273

274

275 for (const MachineOperand &MO : MI->operands()) {

276 if (!MO.isReg() || !MO.isDef()) continue;

277 if (MO.getReg() == ARM::CPSR)

278 *CPSR = true;

279 }

280 return true;

281}

282

283bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) {

284 const MCInstrDesc &MCID = MI->getDesc();

285

286

289 return MI->isPredicable();

290

291 for (const MCOperandInfo &opInfo : MCID.operands())

292 if (opInfo.isPredicate())

293 return true;

294

295 return false;

296}

297

298

299

300

301

302

303const MachineInstrBuilder &

304ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {

305 MachineInstr *MI = &*MIB;

306

307

308

309

310 if (isARMNEONPred(MI))

312

313

314

315 bool CPSR = false;

316 if (DefinesOptionalPredicate(MI, &CPSR))

318 return MIB;

319}

320

321Register ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode,

322 const TargetRegisterClass *RC,

324 Register ResultReg = createResultReg(RC);

325 const MCInstrDesc &II = TII.get(MachineInstOpcode);

326

327

328

330 if (II.getNumDefs() >= 1) {

331 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II,

332 ResultReg).addReg(Op0));

333 } else {

334 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)

336 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

337 TII.get(TargetOpcode::COPY), ResultReg)

338 .addReg(II.implicit_defs()[0]));

339 }

340 return ResultReg;

341}

342

343Register ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,

344 const TargetRegisterClass *RC,

346 Register ResultReg = createResultReg(RC);

347 const MCInstrDesc &II = TII.get(MachineInstOpcode);

348

349

350

353

354 if (II.getNumDefs() >= 1) {

355 AddOptionalDefs(

356 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)

359 } else {

360 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)

363 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

364 TII.get(TargetOpcode::COPY), ResultReg)

365 .addReg(II.implicit_defs()[0]));

366 }

367 return ResultReg;

368}

369

370Register ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,

371 const TargetRegisterClass *RC,

372 Register Op0, uint64_t Imm) {

373 Register ResultReg = createResultReg(RC);

374 const MCInstrDesc &II = TII.get(MachineInstOpcode);

375

376

377

379 if (II.getNumDefs() >= 1) {

380 AddOptionalDefs(

381 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)

384 } else {

385 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)

388 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

389 TII.get(TargetOpcode::COPY), ResultReg)

390 .addReg(II.implicit_defs()[0]));

391 }

392 return ResultReg;

393}

394

395Register ARMFastISel::fastEmitInst_i(unsigned MachineInstOpcode,

396 const TargetRegisterClass *RC,

397 uint64_t Imm) {

398 Register ResultReg = createResultReg(RC);

399 const MCInstrDesc &II = TII.get(MachineInstOpcode);

400

401 if (II.getNumDefs() >= 1) {

402 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II,

403 ResultReg).addImm(Imm));

404 } else {

405 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)

407 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

408 TII.get(TargetOpcode::COPY), ResultReg)

409 .addReg(II.implicit_defs()[0]));

410 }

411 return ResultReg;

412}

413

414

415

416Register ARMFastISel::ARMMoveToFPReg(MVT VT, Register SrcReg) {

417 if (VT == MVT::f64)

419

421 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

422 TII.get(ARM::VMOVSR), MoveReg)

425}

426

427Register ARMFastISel::ARMMoveToIntReg(MVT VT, Register SrcReg) {

428 if (VT == MVT::i64)

430

432 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

433 TII.get(ARM::VMOVRS), MoveReg)

436}

437

438

439

440

441Register ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) {

443 bool is64bit = VT == MVT::f64;

444

445

446

449 unsigned Opc;

450 if (is64bit) {

452 Opc = ARM::FCONSTD;

453 } else {

455 Opc = ARM::FCONSTS;

456 }

458 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

460 return DestReg;

461 }

462

463

464 if (!Subtarget->hasVFP2Base()) return false;

465

466

467 Align Alignment = DL.getPrefTypeAlign(CFP->getType());

468 unsigned Idx = MCP.getConstantPoolIndex(cast(CFP), Alignment);

470 unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;

471

472

473 AddOptionalDefs(

474 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg)

477 return DestReg;

478}

479

480Register ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {

481 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)

483

484

485

488 unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;

489 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :

490 &ARM::GPRRegClass;

491 Register ImmReg = createResultReg(RC);

492 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

495 return ImmReg;

496 }

497

498

499 if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) {

503 if (UseImm) {

504 unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;

505 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :

506 &ARM::GPRRegClass;

507 Register ImmReg = createResultReg(RC);

508 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

511 return ImmReg;

512 }

513 }

514

516 if (Subtarget->useMovt())

518

519 if (ResultReg)

520 return ResultReg;

521

522

523 if (VT != MVT::i32)

525

526

527 Align Alignment = DL.getPrefTypeAlign(C->getType());

528 unsigned Idx = MCP.getConstantPoolIndex(C, Alignment);

530 if (isThumb2)

531 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

532 TII.get(ARM::t2LDRpci), ResultReg)

534 else {

535

537 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

538 TII.get(ARM::LDRcp), ResultReg)

541 }

542 return ResultReg;

543}

544

545bool ARMFastISel::isPositionIndependent() const {

547}

548

549Register ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {

550

553

554

555 if (Subtarget->isROPI() || Subtarget->isRWPI())

557

558 bool IsIndirect = Subtarget->isGVIndirectSymbol(GV);

559 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass

560 : &ARM::GPRRegClass;

561 Register DestReg = createResultReg(RC);

562

563

565 bool IsThreadLocal = GVar && GVar->isThreadLocal();

566 if (!Subtarget->isTargetMachO() && IsThreadLocal)

568

569 bool IsPositionIndependent = isPositionIndependent();

570

571

572 if (Subtarget->useMovt() &&

573 (Subtarget->isTargetMachO() || !IsPositionIndependent)) {

574 unsigned Opc;

575 unsigned char TF = 0;

576 if (Subtarget->isTargetMachO())

578

579 if (IsPositionIndependent)

580 Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel;

581 else

582 Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;

583 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

585 } else {

586

587 Align Alignment = DL.getPrefTypeAlign(GV->getType());

588

589 if (Subtarget->isTargetELF() && IsPositionIndependent)

590 return ARMLowerPICELF(GV, VT);

591

592

593 unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;

597 PCAdj);

598 unsigned Idx = MCP.getConstantPoolIndex(CPV, Alignment);

599

600

601 MachineInstrBuilder MIB;

602 if (isThumb2) {

603 unsigned Opc = IsPositionIndependent ? ARM::t2LDRpci_pic : ARM::t2LDRpci;

604 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc),

606 if (IsPositionIndependent)

608 AddOptionalDefs(MIB);

609 } else {

610

612 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

613 TII.get(ARM::LDRcp), DestReg)

616 AddOptionalDefs(MIB);

617

618 if (IsPositionIndependent) {

619 unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD;

621

622 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,

626 AddOptionalDefs(MIB);

627 return NewDestReg;

628 }

629 }

630 }

631

632 if ((Subtarget->isTargetELF() && Subtarget->isGVInGOT(GV)) ||

633 (Subtarget->isTargetMachO() && IsIndirect)) {

634 MachineInstrBuilder MIB;

636 if (isThumb2)

637 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

638 TII.get(ARM::t2LDRi12), NewDestReg)

641 else

642 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

643 TII.get(ARM::LDRi12), NewDestReg)

646 DestReg = NewDestReg;

647 AddOptionalDefs(MIB);

648 }

649

650 return DestReg;

651}

652

653Register ARMFastISel::fastMaterializeConstant(const Constant *C) {

655

656

660

662 return ARMMaterializeFP(CFP, VT);

664 return ARMMaterializeGV(GV, VT);

666 return ARMMaterializeInt(C, VT);

667

669}

670

671

672

673Register ARMFastISel::fastMaterializeAlloca(const AllocaInst *AI) {

674

675 if (!FuncInfo.StaticAllocaMap.count(AI))

677

678 MVT VT;

679 if (!isLoadTypeLegal(AI->getType(), VT))

681

682 DenseMap<const AllocaInst*, int>::iterator SI =

683 FuncInfo.StaticAllocaMap.find(AI);

684

685

686

687 if (SI != FuncInfo.StaticAllocaMap.end()) {

688 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;

689 const TargetRegisterClass* RC = TLI.getRegClassFor(VT);

690 Register ResultReg = createResultReg(RC);

692

693 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

697 return ResultReg;

698 }

699

701}

702

703bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) {

705

706

707 if (evt == MVT::Other || !evt.isSimple()) return false;

709

710

711

713}

714

715bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {

716 if (isTypeLegal(Ty, VT)) return true;

717

718

719

720 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)

721 return true;

722

723 return false;

724}

725

726

727bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {

728

729 const User *U = nullptr;

730 unsigned Opcode = Instruction::UserOp1;

732

733

734 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||

735 FuncInfo.getMBB(I->getParent()) == FuncInfo.MBB) {

736 Opcode = I->getOpcode();

737 U = I;

738 }

740 Opcode = C->getOpcode();

741 U = C;

742 }

743

745 if (Ty->getAddressSpace() > 255)

746

747

748 return false;

749

750 switch (Opcode) {

751 default:

752 break;

753 case Instruction::BitCast:

754

755 return ARMComputeAddress(U->getOperand(0), Addr);

756 case Instruction::IntToPtr:

757

760 return ARMComputeAddress(U->getOperand(0), Addr);

761 break;

762 case Instruction::PtrToInt:

763

765 return ARMComputeAddress(U->getOperand(0), Addr);

766 break;

767 case Instruction::GetElementPtr: {

768 Address SavedAddr = Addr;

769 int TmpOffset = Addr.getOffset();

770

771

772

775 i != e; ++i, ++GTI) {

778 const StructLayout *SL = DL.getStructLayout(STy);

781 } else {

783 while (true) {

785

787 break;

788 }

789 if (canFoldAddIntoGEP(U, Op)) {

790

791 ConstantInt *CI =

794

796 continue;

797 }

798

799 goto unsupported_gep;

800 }

801 }

802 }

803

804

805 Addr.setOffset(TmpOffset);

806 if (ARMComputeAddress(U->getOperand(0), Addr)) return true;

807

808

809 Addr = SavedAddr;

810

811 unsupported_gep:

812 break;

813 }

814 case Instruction::Alloca: {

816 DenseMap<const AllocaInst*, int>::iterator SI =

817 FuncInfo.StaticAllocaMap.find(AI);

818 if (SI != FuncInfo.StaticAllocaMap.end()) {

819 Addr.setKind(Address::FrameIndexBase);

820 Addr.setFI(SI->second);

821 return true;

822 }

823 break;

824 }

825 }

826

827

828 if (!Addr.getReg())

829 Addr.setReg(getRegForValue(Obj));

830 return Addr.getReg();

831}

832

833void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) {

834 bool needsLowering = false;

837 case MVT::i1:

838 case MVT::i8:

839 case MVT::i16:

840 case MVT::i32:

841 if (!useAM3) {

842

843 needsLowering = ((Addr.getOffset() & 0xfff) != Addr.getOffset());

844

845 if (needsLowering && isThumb2)

846 needsLowering = !(Subtarget->hasV6T2Ops() && Addr.getOffset() < 0 &&

847 Addr.getOffset() > -256);

848 } else {

849

850 needsLowering = (Addr.getOffset() > 255 || Addr.getOffset() < -255);

851 }

852 break;

853 case MVT::f32:

854 case MVT::f64:

855

856 needsLowering = ((Addr.getOffset() & 0xff) != Addr.getOffset());

857 break;

858 }

859

860

861

862

863 if (needsLowering && Addr.isFIBase()) {

864 const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass

865 : &ARM::GPRRegClass;

866 Register ResultReg = createResultReg(RC);

867 unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;

868 AddOptionalDefs(

869 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg)

872 Addr.setKind(Address::RegBase);

873 Addr.setReg(ResultReg);

874 }

875

876

877

878 if (needsLowering) {

879 Addr.setReg(fastEmit_ri_(MVT::i32, ISD::ADD, Addr.getReg(),

880 Addr.getOffset(), MVT::i32));

881 Addr.setOffset(0);

882 }

883}

884

885void ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr,

886 const MachineInstrBuilder &MIB,

888 bool useAM3) {

889

890

892 Addr.setOffset(Addr.getOffset() / 4);

893

894

895 if (Addr.isFIBase()) {

896 int FI = Addr.getFI();

897 int Offset = Addr.getOffset();

898 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(

900 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));

901

903

904

905

906 if (useAM3) {

907 int Imm = (Addr.getOffset() < 0) ? (0x100 | -Addr.getOffset())

908 : Addr.getOffset();

911 } else {

912 MIB.addImm(Addr.getOffset());

913 }

915 } else {

916

917 MIB.addReg(Addr.getReg());

918

919

920

921 if (useAM3) {

922 int Imm = (Addr.getOffset() < 0) ? (0x100 | -Addr.getOffset())

923 : Addr.getOffset();

926 } else {

927 MIB.addImm(Addr.getOffset());

928 }

929 }

930 AddOptionalDefs(MIB);

931}

932

933bool ARMFastISel::ARMEmitLoad(MVT VT, Register &ResultReg, Address &Addr,

934 MaybeAlign Alignment, bool isZExt,

935 bool allocReg) {

936 unsigned Opc;

937 bool useAM3 = false;

938 bool needVMOV = false;

939 const TargetRegisterClass *RC;

941

942 default: return false;

943 case MVT::i1:

944 case MVT::i8:

945 if (isThumb2) {

946 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&

947 Subtarget->hasV6T2Ops())

948 Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;

949 else

950 Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;

951 } else {

952 if (isZExt) {

953 Opc = ARM::LDRBi12;

954 } else {

955 Opc = ARM::LDRSB;

956 useAM3 = true;

957 }

958 }

959 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;

960 break;

961 case MVT::i16:

962 if (Alignment && *Alignment < Align(2) &&

963 !Subtarget->allowsUnalignedMem())

964 return false;

965

966 if (isThumb2) {

967 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&

968 Subtarget->hasV6T2Ops())

969 Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;

970 else

971 Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;

972 } else {

973 Opc = isZExt ? ARM::LDRH : ARM::LDRSH;

974 useAM3 = true;

975 }

976 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;

977 break;

978 case MVT::i32:

979 if (Alignment && *Alignment < Align(4) &&

980 !Subtarget->allowsUnalignedMem())

981 return false;

982

983 if (isThumb2) {

984 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&

985 Subtarget->hasV6T2Ops())

986 Opc = ARM::t2LDRi8;

987 else

988 Opc = ARM::t2LDRi12;

989 } else {

990 Opc = ARM::LDRi12;

991 }

992 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;

993 break;

994 case MVT::f32:

995 if (!Subtarget->hasVFP2Base()) return false;

996

997 if (Alignment && *Alignment < Align(4)) {

998 needVMOV = true;

999 VT = MVT::i32;

1000 Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;

1001 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;

1002 } else {

1003 Opc = ARM::VLDRS;

1005 }

1006 break;

1007 case MVT::f64:

1008

1009 if (!Subtarget->hasVFP2Base()) return false;

1010

1011

1012 if (Alignment && *Alignment < Align(4))

1013 return false;

1014

1015 Opc = ARM::VLDRD;

1017 break;

1018 }

1019

1020 ARMSimplifyAddress(Addr, VT, useAM3);

1021

1022

1023 if (allocReg)

1024 ResultReg = createResultReg(RC);

1025 assert(ResultReg.isVirtual() && "Expected an allocated virtual register.");

1026 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

1029

1030

1031

1032 if (needVMOV) {

1034 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

1035 TII.get(ARM::VMOVSR), MoveReg)

1036 .addReg(ResultReg));

1038 }

1039 return true;

1040}

1041

1042bool ARMFastISel::SelectLoad(const Instruction *I) {

1043

1045 return false;

1046

1047 const Value *SV = I->getOperand(0);

1049

1050

1052 if (Arg->hasSwiftErrorAttr())

1053 return false;

1054 }

1055

1057 if (Alloca->isSwiftError())

1058 return false;

1059 }

1060 }

1061

1062

1063 MVT VT;

1064 if (!isLoadTypeLegal(I->getType(), VT))

1065 return false;

1066

1067

1069 if (!ARMComputeAddress(I->getOperand(0), Addr)) return false;

1070

1073 return false;

1074 updateValueMap(I, ResultReg);

1075 return true;

1076}

1077

1078bool ARMFastISel::ARMEmitStore(MVT VT, Register SrcReg, Address &Addr,

1079 MaybeAlign Alignment) {

1080 unsigned StrOpc;

1081 bool useAM3 = false;

1083

1084 default: return false;

1085 case MVT::i1: {

1086 Register Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass

1087 : &ARM::GPRRegClass);

1088 unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;

1090 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

1093 SrcReg = Res;

1094 [[fallthrough]];

1095 }

1096 case MVT::i8:

1097 if (isThumb2) {

1098 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&

1099 Subtarget->hasV6T2Ops())

1100 StrOpc = ARM::t2STRBi8;

1101 else

1102 StrOpc = ARM::t2STRBi12;

1103 } else {

1104 StrOpc = ARM::STRBi12;

1105 }

1106 break;

1107 case MVT::i16:

1108 if (Alignment && *Alignment < Align(2) &&

1109 !Subtarget->allowsUnalignedMem())

1110 return false;

1111

1112 if (isThumb2) {

1113 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&

1114 Subtarget->hasV6T2Ops())

1115 StrOpc = ARM::t2STRHi8;

1116 else

1117 StrOpc = ARM::t2STRHi12;

1118 } else {

1119 StrOpc = ARM::STRH;

1120 useAM3 = true;

1121 }

1122 break;

1123 case MVT::i32:

1124 if (Alignment && *Alignment < Align(4) &&

1125 !Subtarget->allowsUnalignedMem())

1126 return false;

1127

1128 if (isThumb2) {

1129 if (Addr.getOffset() < 0 && Addr.getOffset() > -256 &&

1130 Subtarget->hasV6T2Ops())

1131 StrOpc = ARM::t2STRi8;

1132 else

1133 StrOpc = ARM::t2STRi12;

1134 } else {

1135 StrOpc = ARM::STRi12;

1136 }

1137 break;

1138 case MVT::f32:

1139 if (!Subtarget->hasVFP2Base()) return false;

1140

1141 if (Alignment && *Alignment < Align(4)) {

1143 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

1144 TII.get(ARM::VMOVRS), MoveReg)

1147 VT = MVT::i32;

1148 StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12;

1149 } else {

1150 StrOpc = ARM::VSTRS;

1151 }

1152 break;

1153 case MVT::f64:

1154

1155 if (!Subtarget->hasVFP2Base()) return false;

1156

1157

1158 if (Alignment && *Alignment < Align(4))

1159 return false;

1160

1161 StrOpc = ARM::VSTRD;

1162 break;

1163 }

1164

1165 ARMSimplifyAddress(Addr, VT, useAM3);

1166

1167

1169 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

1173 return true;

1174}

1175

1176bool ARMFastISel::SelectStore(const Instruction *I) {

1177 Value *Op0 = I->getOperand(0);

1179

1180

1182 return false;

1183

1184 const Value *PtrV = I->getOperand(1);

1186

1187

1189 if (Arg->hasSwiftErrorAttr())

1190 return false;

1191 }

1192

1194 if (Alloca->isSwiftError())

1195 return false;

1196 }

1197 }

1198

1199

1200 MVT VT;

1201 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))

1202 return false;

1203

1204

1205 SrcReg = getRegForValue(Op0);

1206 if (!SrcReg)

1207 return false;

1208

1209

1211 if (!ARMComputeAddress(I->getOperand(1), Addr))

1212 return false;

1213

1215 return false;

1216 return true;

1217}

1218

1220 switch (Pred) {

1221

1224 default:

1225

1263 }

1264}

1265

1266bool ARMFastISel::SelectBranch(const Instruction *I) {

1268 MachineBasicBlock *TBB = FuncInfo.getMBB(BI->getSuccessor(0));

1269 MachineBasicBlock *FBB = FuncInfo.getMBB(BI->getSuccessor(1));

1270

1271

1272

1273

1274

1276 if (CI->hasOneUse() && (CI->getParent() == I->getParent())) {

1277

1278

1280 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {

1283 }

1284

1286

1287

1288 if (ARMPred == ARMCC::AL) return false;

1289

1290

1292 return false;

1293

1294 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;

1295 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(BrOpc))

1297 finishCondBranch(BI->getParent(), TBB, FBB);

1298 return true;

1299 }

1301 MVT SourceVT;

1302 if (TI->hasOneUse() && TI->getParent() == I->getParent() &&

1303 (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {

1304 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;

1305 Register OpReg = getRegForValue(TI->getOperand(0));

1307 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

1310

1312 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {

1315 }

1316

1317 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;

1318 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(BrOpc))

1320

1321 finishCondBranch(BI->getParent(), TBB, FBB);

1322 return true;

1323 }

1324 } else if (const ConstantInt *CI =

1327 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;

1328 fastEmitBranch(Target, MIMD.getDL());

1329 return true;

1330 }

1331

1333 if (!CmpReg)

1334 return false;

1335

1336

1337

1338

1339

1340

1341

1342

1343 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;

1345 AddOptionalDefs(

1346 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TstOpc))

1349

1351 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {

1354 }

1355

1356 unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;

1357 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(BrOpc))

1359 finishCondBranch(BI->getParent(), TBB, FBB);

1360 return true;

1361}

1362

1363bool ARMFastISel::SelectIndirectBr(const Instruction *I) {

1364 Register AddrReg = getRegForValue(I->getOperand(0));

1365 if (!AddrReg)

1366 return false;

1367

1368 unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;

1369 assert(isThumb2 || Subtarget->hasV4TOps());

1370

1371 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

1373

1375 for (const BasicBlock *SuccBB : IB->successors())

1376 FuncInfo.MBB->addSuccessor(FuncInfo.getMBB(SuccBB));

1377

1378 return true;

1379}

1380

1381bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,

1382 bool isZExt) {

1385 if (!SrcEVT.isSimple()) return false;

1387

1388 if (Ty->isFloatTy() && !Subtarget->hasVFP2Base())

1389 return false;

1390

1391 if (Ty->isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()))

1392 return false;

1393

1394

1395

1396 int Imm = 0;

1397 bool UseImm = false;

1398 bool isNegativeImm = false;

1399

1400

1402 if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 ||

1403 SrcVT == MVT::i1) {

1404 const APInt &CIVal = ConstInt->getValue();

1406

1407

1408

1409 if (Imm < 0 && Imm != (int)0x80000000) {

1410 isNegativeImm = true;

1412 }

1415 }

1417 if (SrcVT == MVT::f32 || SrcVT == MVT::f64)

1418 if (ConstFP->isZero() && !ConstFP->isNegative())

1419 UseImm = true;

1420 }

1421

1422 unsigned CmpOpc;

1423 bool isICmp = true;

1424 bool needsExt = false;

1426 default: return false;

1427

1428 case MVT::f32:

1429 isICmp = false;

1430 CmpOpc = UseImm ? ARM::VCMPZS : ARM::VCMPS;

1431 break;

1432 case MVT::f64:

1433 isICmp = false;

1434 CmpOpc = UseImm ? ARM::VCMPZD : ARM::VCMPD;

1435 break;

1436 case MVT::i1:

1437 case MVT::i8:

1438 case MVT::i16:

1439 needsExt = true;

1440 [[fallthrough]];

1441 case MVT::i32:

1442 if (isThumb2) {

1443 if (!UseImm)

1444 CmpOpc = ARM::t2CMPrr;

1445 else

1446 CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri;

1447 } else {

1448 if (!UseImm)

1449 CmpOpc = ARM::CMPrr;

1450 else

1451 CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri;

1452 }

1453 break;

1454 }

1455

1456 Register SrcReg1 = getRegForValue(Src1Value);

1457 if (!SrcReg1)

1458 return false;

1459

1461 if (!UseImm) {

1462 SrcReg2 = getRegForValue(Src2Value);

1463 if (!SrcReg2)

1464 return false;

1465 }

1466

1467

1468 if (needsExt) {

1469 SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt);

1470 if (!SrcReg1)

1471 return false;

1472 if (!UseImm) {

1473 SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt);

1474 if (!SrcReg2)

1475 return false;

1476 }

1477 }

1478

1479 const MCInstrDesc &II = TII.get(CmpOpc);

1481 if (!UseImm) {

1483 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)

1485 } else {

1486 MachineInstrBuilder MIB;

1487 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)

1489

1490

1491 if (isICmp)

1493 AddOptionalDefs(MIB);

1494 }

1495

1496

1497

1499 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

1500 TII.get(ARM::FMSTAT)));

1501 return true;

1502}

1503

1504bool ARMFastISel::SelectCmp(const Instruction *I) {

1506

1507

1509

1510

1511 if (ARMPred == ARMCC::AL) return false;

1512

1513

1515 return false;

1516

1517

1518

1519 unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;

1520 const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass

1521 : &ARM::GPRRegClass;

1522 Register DestReg = createResultReg(RC);

1524 Register ZeroReg = fastMaterializeConstant(Zero);

1525

1526 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(MovCCOpc), DestReg)

1529

1530 updateValueMap(I, DestReg);

1531 return true;

1532}

1533

1534bool ARMFastISel::SelectFPExt(const Instruction *I) {

1535

1536 if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()) return false;

1537

1538 Value *V = I->getOperand(0);

1539 if (I->getType()->isDoubleTy() ||

1540 V->getType()->isFloatTy()) return false;

1541

1543 if (Op)

1544 return false;

1545

1546 Register Result = createResultReg(&ARM::DPRRegClass);

1547 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

1548 TII.get(ARM::VCVTDS), Result)

1550 updateValueMap(I, Result);

1551 return true;

1552}

1553

1554bool ARMFastISel::SelectFPTrunc(const Instruction *I) {

1555

1556 if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()) return false;

1557

1558 Value *V = I->getOperand(0);

1559 if (!(I->getType()->isFloatTy() &&

1560 V->getType()->isDoubleTy())) return false;

1561

1563 if (Op)

1564 return false;

1565

1566 Register Result = createResultReg(&ARM::SPRRegClass);

1567 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

1568 TII.get(ARM::VCVTSD), Result)

1570 updateValueMap(I, Result);

1571 return true;

1572}

1573

1574bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {

1575

1576 if (!Subtarget->hasVFP2Base()) return false;

1577

1578 MVT DstVT;

1579 Type *Ty = I->getType();

1580 if (!isTypeLegal(Ty, DstVT))

1581 return false;

1582

1583 Value *Src = I->getOperand(0);

1584 EVT SrcEVT = TLI.getValueType(DL, Src->getType(), true);

1586 return false;

1588 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)

1589 return false;

1590

1591 Register SrcReg = getRegForValue(Src);

1592 if (!SrcReg)

1593 return false;

1594

1595

1596 if (SrcVT == MVT::i16 || SrcVT == MVT::i8) {

1597 SrcReg = ARMEmitIntExt(SrcVT, SrcReg, MVT::i32,

1599 if (!SrcReg)

1600 return false;

1601 }

1602

1603

1604

1605 Register FP = ARMMoveToFPReg(MVT::f32, SrcReg);

1606 if (FP)

1607 return false;

1608

1609 unsigned Opc;

1611 else if (Ty->isDoubleTy() && Subtarget->hasFP64())

1612 Opc = isSigned ? ARM::VSITOD : ARM::VUITOD;

1613 else return false;

1614

1616 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

1618 updateValueMap(I, ResultReg);

1619 return true;

1620}

1621

1622bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) {

1623

1624 if (!Subtarget->hasVFP2Base()) return false;

1625

1626 MVT DstVT;

1627 Type *RetTy = I->getType();

1628 if (!isTypeLegal(RetTy, DstVT))

1629 return false;

1630

1631 Register Op = getRegForValue(I->getOperand(0));

1632 if (Op)

1633 return false;

1634

1635 unsigned Opc;

1636 Type *OpTy = I->getOperand(0)->getType();

1638 else if (OpTy->isDoubleTy() && Subtarget->hasFP64())

1639 Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD;

1640 else return false;

1641

1642

1644 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

1646

1647

1648

1649 Register IntReg = ARMMoveToIntReg(DstVT, ResultReg);

1650 if (!IntReg)

1651 return false;

1652

1653 updateValueMap(I, IntReg);

1654 return true;

1655}

1656

1657bool ARMFastISel::SelectSelect(const Instruction *I) {

1658 MVT VT;

1659 if (!isTypeLegal(I->getType(), VT))

1660 return false;

1661

1662

1663 if (VT != MVT::i32) return false;

1664

1665 Register CondReg = getRegForValue(I->getOperand(0));

1666 if (!CondReg)

1667 return false;

1668 Register Op1Reg = getRegForValue(I->getOperand(1));

1669 if (!Op1Reg)

1670 return false;

1671

1672

1673 int Imm = 0;

1674 bool UseImm = false;

1675 bool isNegativeImm = false;

1677 assert(VT == MVT::i32 && "Expecting an i32.");

1678 Imm = (int)ConstInt->getValue().getZExtValue();

1679 if (Imm < 0) {

1680 isNegativeImm = true;

1681 Imm = ~Imm;

1682 }

1685 }

1686

1688 if (!UseImm) {

1689 Op2Reg = getRegForValue(I->getOperand(2));

1690 if (!Op2Reg)

1691 return false;

1692 }

1693

1694 unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;

1696 AddOptionalDefs(

1697 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TstOpc))

1700

1701 unsigned MovCCOpc;

1702 const TargetRegisterClass *RC;

1703 if (!UseImm) {

1704 RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass;

1705 MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;

1706 } else {

1707 RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass;

1708 if (!isNegativeImm)

1709 MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;

1710 else

1711 MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;

1712 }

1713 Register ResultReg = createResultReg(RC);

1714 if (!UseImm) {

1717 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(MovCCOpc),

1718 ResultReg)

1723 } else {

1725 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(MovCCOpc),

1726 ResultReg)

1731 }

1732 updateValueMap(I, ResultReg);

1733 return true;

1734}

1735

1736bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) {

1737 MVT VT;

1738 Type *Ty = I->getType();

1739 if (!isTypeLegal(Ty, VT))

1740 return false;

1741

1742

1743

1744

1745 if (Subtarget->hasDivideInThumbMode())

1746 return false;

1747

1748

1749 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;

1750 if (VT == MVT::i8)

1751 LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8;

1752 else if (VT == MVT::i16)

1753 LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16;

1754 else if (VT == MVT::i32)

1755 LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32;

1756 else if (VT == MVT::i64)

1757 LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64;

1758 else if (VT == MVT::i128)

1759 LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128;

1760 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!");

1761

1762 return ARMEmitLibcall(I, LC);

1763}

1764

1765bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) {

1766 MVT VT;

1767 Type *Ty = I->getType();

1768 if (!isTypeLegal(Ty, VT))

1769 return false;

1770

1771

1772

1773

1775 return false;

1776 }

1777

1778 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;

1779 if (VT == MVT::i8)

1780 LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8;

1781 else if (VT == MVT::i16)

1782 LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16;

1783 else if (VT == MVT::i32)

1784 LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32;

1785 else if (VT == MVT::i64)

1786 LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64;

1787 else if (VT == MVT::i128)

1788 LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128;

1789 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!");

1790

1791 return ARMEmitLibcall(I, LC);

1792}

1793

1794bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {

1796

1797

1798

1799 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)

1800 return false;

1801

1802 unsigned Opc;

1803 switch (ISDOpcode) {

1804 default: return false;

1806 Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr;

1807 break;

1809 Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr;

1810 break;

1812 Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr;

1813 break;

1814 }

1815

1816 Register SrcReg1 = getRegForValue(I->getOperand(0));

1817 if (!SrcReg1)

1818 return false;

1819

1820

1821

1822 Register SrcReg2 = getRegForValue(I->getOperand(1));

1823 if (!SrcReg2)

1824 return false;

1825

1826 Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);

1829 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

1832 updateValueMap(I, ResultReg);

1833 return true;

1834}

1835

1836bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) {

1838 if (!FPVT.isSimple()) return false;

1840

1841

1843 return false;

1844

1845

1846

1847

1848

1849 Type *Ty = I->getType();

1850 if (Ty->isFloatTy() && !Subtarget->hasVFP2Base())

1851 return false;

1852 if (Ty->isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()))

1853 return false;

1854

1855 unsigned Opc;

1856 bool is64bit = VT == MVT::f64 || VT == MVT::i64;

1857 switch (ISDOpcode) {

1858 default: return false;

1860 Opc = is64bit ? ARM::VADDD : ARM::VADDS;

1861 break;

1863 Opc = is64bit ? ARM::VSUBD : ARM::VSUBS;

1864 break;

1866 Opc = is64bit ? ARM::VMULD : ARM::VMULS;

1867 break;

1868 }

1869 Register Op1 = getRegForValue(I->getOperand(0));

1870 if (!Op1)

1871 return false;

1872

1873 Register Op2 = getRegForValue(I->getOperand(1));

1874 if (!Op2)

1875 return false;

1876

1878 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

1881 updateValueMap(I, ResultReg);

1882 return true;

1883}

1884

1885

1886

1887

1888

1889CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC,

1890 bool Return,

1891 bool isVarArg) {

1892 switch (CC) {

1893 default:

1895 case CallingConv::Fast:

1896 if (Subtarget->hasVFP2Base() && !isVarArg) {

1899

1901 }

1902 [[fallthrough]];

1903 case CallingConv::C:

1904 case CallingConv::CXX_FAST_TLS:

1905

1907 if (Subtarget->hasFPRegs() &&

1910 else

1912 } else {

1914 }

1915 case CallingConv::ARM_AAPCS_VFP:

1916 case CallingConv::Swift:

1917 case CallingConv::SwiftTail:

1918 if (!isVarArg)

1920

1921

1922 [[fallthrough]];

1923 case CallingConv::ARM_AAPCS:

1925 case CallingConv::ARM_APCS:

1927 case CallingConv::GHC:

1928 if (Return)

1930 else

1932 case CallingConv::CFGuard_Check:

1934 }

1935}

1936

1937bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,

1938 SmallVectorImpl &ArgRegs,

1939 SmallVectorImpl &ArgVTs,

1940 SmallVectorImplISD::ArgFlagsTy &ArgFlags,

1941 SmallVectorImpl &RegArgs,

1942 CallingConv::ID CC,

1943 unsigned &NumBytes,

1944 bool isVarArg) {

1947 for (Value *Arg : Args)

1948 OrigTys.push_back(Arg->getType());

1949 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, ArgLocs, *Context);

1950 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, OrigTys,

1951 CCAssignFnForCall(CC, false, isVarArg));

1952

1953

1954

1955 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {

1956 CCValAssign &VA = ArgLocs[i];

1957 MVT ArgVT = ArgVTs[VA.getValNo()];

1958

1959

1961 return false;

1962

1963

1965 continue;

1967

1968 if (VA.getLocVT() != MVT::f64 ||

1969

1970 !VA.isRegLoc() || !ArgLocs[++i].isRegLoc())

1971 return false;

1972 } else {

1974 default:

1975 return false;

1976 case MVT::i1:

1977 case MVT::i8:

1978 case MVT::i16:

1979 case MVT::i32:

1980 break;

1981 case MVT::f32:

1982 if (!Subtarget->hasVFP2Base())

1983 return false;

1984 break;

1985 case MVT::f64:

1986 if (!Subtarget->hasVFP2Base())

1987 return false;

1988 break;

1989 }

1990 }

1991 }

1992

1993

1994

1995

1996 NumBytes = CCInfo.getStackSize();

1997

1998

2000 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

2001 TII.get(AdjStackDown))

2003

2004

2005 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {

2006 CCValAssign &VA = ArgLocs[i];

2009 MVT ArgVT = ArgVTs[VA.getValNo()];

2010

2012 "We don't handle NEON/vector parameters yet.");

2013

2014

2019 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, false);

2020 assert(Arg && "Failed to emit a sext");

2021 ArgVT = DestVT;

2022 break;

2023 }

2025

2028 Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, true);

2029 assert(Arg && "Failed to emit a zext");

2030 ArgVT = DestVT;

2031 break;

2032 }

2034 Register BC = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg);

2035 assert(BC && "Failed to emit a bitcast!");

2036 Arg = BC;

2038 break;

2039 }

2041 }

2042

2043

2045 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

2049

2051 "Custom lowering for v2f64 args not available");

2052

2053

2054 CCValAssign &NextVA = ArgLocs[++i];

2055

2057 "We only handle register args!");

2058

2059 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

2065 } else {

2067

2068

2069

2071 continue;

2072

2074 Addr.setKind(Address::RegBase);

2075 Addr.setReg(ARM::SP);

2077

2078 bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet;

2079 assert(EmitRet && "Could not emit a store for argument!");

2080 }

2081 }

2082

2083 return true;

2084}

2085

2086bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl &UsedRegs,

2087 const Instruction *I, CallingConv::ID CC,

2088 unsigned &NumBytes, bool isVarArg) {

2089

2091 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

2092 TII.get(AdjStackUp))

2094

2095

2096 if (RetVT != MVT::isVoid) {

2098 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context);

2099 CCInfo.AnalyzeCallResult(RetVT, I->getType(),

2100 CCAssignFnForCall(CC, true, isVarArg));

2101

2102

2103 if (RVLocs.size() == 2 && RetVT == MVT::f64) {

2104

2105

2106 MVT DestVT = RVLocs[0].getValVT();

2107 const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT);

2108 Register ResultReg = createResultReg(DstRC);

2109 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

2110 TII.get(ARM::VMOVDRR), ResultReg)

2111 .addReg(RVLocs[0].getLocReg())

2112 .addReg(RVLocs[1].getLocReg()));

2113

2114 UsedRegs.push_back(RVLocs[0].getLocReg());

2115 UsedRegs.push_back(RVLocs[1].getLocReg());

2116

2117

2118 updateValueMap(I, ResultReg);

2119 } else {

2120 assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!");

2121 MVT CopyVT = RVLocs[0].getValVT();

2122

2123

2124 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)

2125 CopyVT = MVT::i32;

2126

2127 const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);

2128

2129 Register ResultReg = createResultReg(DstRC);

2130 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

2131 TII.get(TargetOpcode::COPY),

2132 ResultReg).addReg(RVLocs[0].getLocReg());

2133 UsedRegs.push_back(RVLocs[0].getLocReg());

2134

2135

2136 updateValueMap(I, ResultReg);

2137 }

2138 }

2139

2140 return true;

2141}

2142

2143bool ARMFastISel::SelectRet(const Instruction *I) {

2145 const Function &F = *I->getParent()->getParent();

2146 const bool IsCmseNSEntry = F.hasFnAttribute("cmse_nonsecure_entry");

2147

2148 if (!FuncInfo.CanLowerReturn)

2149 return false;

2150

2152 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))

2153 return false;

2154

2156 return false;

2157

2158

2160

2161 CallingConv::ID CC = F.getCallingConv();

2164 GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL);

2165

2166

2168 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext());

2169 CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true ,

2170 F.isVarArg()));

2171

2174 if (Reg)

2175 return false;

2176

2177

2178 if (ValLocs.size() != 1)

2179 return false;

2180

2181 CCValAssign &VA = ValLocs[0];

2182

2183

2185 return false;

2186

2188 return false;

2189

2192 if (!RVEVT.isSimple()) return false;

2195

2196 if (RVVT != DestVT) {

2197 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)

2198 return false;

2199

2200 assert(DestVT == MVT::i32 && "ARM should always ext to i32");

2201

2202

2203

2204 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) {

2205 SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt());

2206 if (!SrcReg)

2207 return false;

2208 }

2209 }

2210

2211

2213 const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg);

2214

2215 if (!SrcRC->contains(DstReg))

2216 return false;

2217 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

2218 TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg);

2219

2220

2222 }

2223

2224 unsigned RetOpc;

2225 if (IsCmseNSEntry)

2226 if (isThumb2)

2227 RetOpc = ARM::tBXNS_RET;

2228 else

2230 else

2231 RetOpc = Subtarget->getReturnOpcode();

2232

2233 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

2235 AddOptionalDefs(MIB);

2238 return true;

2239}

2240

2241unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) {

2244 else

2245 return isThumb2 ? ARM::tBL : ARM::BL;

2246}

2247

2248Register ARMFastISel::getLibcallReg(const Twine &Name) {

2249

2250 Type *GVTy = PointerType::get(*Context, 0);

2254

2255 GlobalValue *GV = M.getNamedGlobal(Name.str());

2256 if (!GV)

2257 GV = new GlobalVariable(M, Type::getInt32Ty(*Context), false,

2259

2260 return ARMMaterializeGV(GV, LCREVT.getSimpleVT());

2261}

2262

2263

2264

2265

2266

2267

2268

2269

2270bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {

2272

2273

2274 Type *RetTy = I->getType();

2275 MVT RetVT;

2277 RetVT = MVT::isVoid;

2278 else if (!isTypeLegal(RetTy, RetVT))

2279 return false;

2280

2281

2282 if (RetVT != MVT::isVoid && RetVT != MVT::i32) {

2284 CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context);

2285 CCInfo.AnalyzeCallResult(RetVT, RetTy, CCAssignFnForCall(CC, true, false));

2286 if (RVLocs.size() >= 2 && RetVT != MVT::f64)

2287 return false;

2288 }

2289

2290

2291 SmallVector<Value*, 8> Args;

2295 Args.reserve(I->getNumOperands());

2296 ArgRegs.reserve(I->getNumOperands());

2297 ArgVTs.reserve(I->getNumOperands());

2298 ArgFlags.reserve(I->getNumOperands());

2299 for (Value *Op : I->operands()) {

2301 if (!Arg)

2302 return false;

2303

2304 Type *ArgTy = Op->getType();

2305 MVT ArgVT;

2306 if (!isTypeLegal(ArgTy, ArgVT)) return false;

2307

2308 ISD::ArgFlagsTy Flags;

2309 Flags.setOrigAlign(DL.getABITypeAlign(ArgTy));

2310

2315 }

2316

2317

2319 unsigned NumBytes;

2320 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,

2321 RegArgs, CC, NumBytes, false))

2322 return false;

2323

2325 if (Subtarget->genLongCalls()) {

2327 if (!CalleeReg)

2328 return false;

2329 }

2330

2331

2332 unsigned CallOpc = ARMSelectCallOp(Subtarget->genLongCalls());

2333 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,

2334 MIMD, TII.get(CallOpc));

2335

2336 if (isThumb2)

2338 if (Subtarget->genLongCalls()) {

2339 CalleeReg =

2341 MIB.addReg(CalleeReg);

2342 } else

2344

2345

2348

2349

2350

2351 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));

2352

2353

2355 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false;

2356

2357

2358 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);

2359

2360 return true;

2361}

2362

2363bool ARMFastISel::SelectCall(const Instruction *I,

2364 const char *IntrMemName = nullptr) {

2367

2368

2370

2371

2373

2374

2376

2377

2378

2380 bool isVarArg = FTy->isVarArg();

2381

2382

2383 Type *RetTy = I->getType();

2384 MVT RetVT;

2386 RetVT = MVT::isVoid;

2387 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 &&

2388 RetVT != MVT::i8 && RetVT != MVT::i1)

2389 return false;

2390

2391

2392 if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 &&

2393 RetVT != MVT::i16 && RetVT != MVT::i32) {

2395 CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context);

2396 CCInfo.AnalyzeCallResult(RetVT, RetTy,

2397 CCAssignFnForCall(CC, true, isVarArg));

2398 if (RVLocs.size() >= 2 && RetVT != MVT::f64)

2399 return false;

2400 }

2401

2402

2403 SmallVector<Value*, 8> Args;

2407 unsigned arg_size = CI->arg_size();

2408 Args.reserve(arg_size);

2409 ArgRegs.reserve(arg_size);

2410 ArgVTs.reserve(arg_size);

2411 ArgFlags.reserve(arg_size);

2412 for (auto ArgI = CI->arg_begin(), ArgE = CI->arg_end(); ArgI != ArgE; ++ArgI) {

2413

2414

2415 if (IntrMemName && ArgE - ArgI <= 1)

2416 break;

2417

2418 ISD::ArgFlagsTy Flags;

2419 unsigned ArgIdx = ArgI - CI->arg_begin();

2420 if (CI->paramHasAttr(ArgIdx, Attribute::SExt))

2421 Flags.setSExt();

2422 if (CI->paramHasAttr(ArgIdx, Attribute::ZExt))

2423 Flags.setZExt();

2424

2425

2426 if (CI->paramHasAttr(ArgIdx, Attribute::InReg) ||

2427 CI->paramHasAttr(ArgIdx, Attribute::StructRet) ||

2428 CI->paramHasAttr(ArgIdx, Attribute::SwiftSelf) ||

2429 CI->paramHasAttr(ArgIdx, Attribute::SwiftError) ||

2430 CI->paramHasAttr(ArgIdx, Attribute::Nest) ||

2432 return false;

2433

2434 Type *ArgTy = (*ArgI)->getType();

2435 MVT ArgVT;

2436 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 &&

2437 ArgVT != MVT::i1)

2438 return false;

2439

2440 Register Arg = getRegForValue(*ArgI);

2442 return false;

2443

2444 Flags.setOrigAlign(DL.getABITypeAlign(ArgTy));

2445

2446 Args.push_back(*ArgI);

2450 }

2451

2452

2454 unsigned NumBytes;

2455 if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,

2456 RegArgs, CC, NumBytes, isVarArg))

2457 return false;

2458

2459 bool UseReg = false;

2461 if (!GV || Subtarget->genLongCalls()) UseReg = true;

2462

2465 if (IntrMemName)

2466 CalleeReg = getLibcallReg(IntrMemName);

2467 else

2468 CalleeReg = getRegForValue(Callee);

2469

2470 if (!CalleeReg)

2471 return false;

2472 }

2473

2474

2475 unsigned CallOpc = ARMSelectCallOp(UseReg);

2476 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,

2477 MIMD, TII.get(CallOpc));

2478

2479

2480 if(isThumb2)

2483 CalleeReg =

2485 MIB.addReg(CalleeReg);

2486 } else if (!IntrMemName)

2488 else

2490

2491

2494

2495

2496

2497 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));

2498

2499

2501 if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg))

2502 return false;

2503

2504

2505 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);

2506

2508 return true;

2509}

2510

2511bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) {

2512 return Len <= 16;

2513}

2514

2515bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len,

2516 MaybeAlign Alignment) {

2517

2518 if (!ARMIsMemCpySmall(Len))

2519 return false;

2520

2521 while (Len) {

2522 MVT VT;

2523 if (!Alignment || *Alignment >= 4) {

2524 if (Len >= 4)

2525 VT = MVT::i32;

2526 else if (Len >= 2)

2527 VT = MVT::i16;

2528 else {

2529 assert(Len == 1 && "Expected a length of 1!");

2530 VT = MVT::i8;

2531 }

2532 } else {

2533 assert(Alignment && "Alignment is set in this branch");

2534

2535 if (Len >= 2 && *Alignment == 2)

2536 VT = MVT::i16;

2537 else {

2538 VT = MVT::i8;

2539 }

2540 }

2541

2542 bool RV;

2544 RV = ARMEmitLoad(VT, ResultReg, Src);

2545 assert(RV && "Should be able to handle this load.");

2546 RV = ARMEmitStore(VT, ResultReg, Dest);

2547 assert(RV && "Should be able to handle this store.");

2548 (void)RV;

2549

2552 Dest.setOffset(Dest.getOffset() + Size);

2553 Src.setOffset(Src.getOffset() + Size);

2554 }

2555

2556 return true;

2557}

2558

2559bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {

2560

2561 switch (I.getIntrinsicID()) {

2562 default: return false;

2563 case Intrinsic::frameaddress: {

2564 MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();

2566

2567 unsigned LdrOpc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;

2568 const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass

2569 : &ARM::GPRRegClass;

2570

2571 const ARMBaseRegisterInfo *RegInfo = Subtarget->getRegisterInfo();

2574

2575

2576

2577

2578

2579

2582 while (Depth--) {

2583 DestReg = createResultReg(RC);

2584 AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

2585 TII.get(LdrOpc), DestReg)

2587 SrcReg = DestReg;

2588 }

2589 updateValueMap(&I, SrcReg);

2590 return true;

2591 }

2592 case Intrinsic::memcpy:

2593 case Intrinsic::memmove: {

2595

2597 return false;

2598

2599

2600

2601 bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy);

2603

2604

2606 if (ARMIsMemCpySmall(Len)) {

2608 if (!ARMComputeAddress(MTI.getRawDest(), Dest) ||

2609 !ARMComputeAddress(MTI.getRawSource(), Src))

2610 return false;

2611 MaybeAlign Alignment;

2615 if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment))

2616 return true;

2617 }

2618 }

2619

2621 return false;

2622

2624 return false;

2625

2626 const char *IntrMemName = isa(I) ? "memcpy" : "memmove";

2627 return SelectCall(&I, IntrMemName);

2628 }

2629 case Intrinsic::memset: {

2631

2633 return false;

2634

2636 return false;

2637

2639 return false;

2640

2641 return SelectCall(&I, "memset");

2642 }

2643 case Intrinsic:🪤 {

2644 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

2645 TII.get(Subtarget->isThumb() ? ARM::tTRAP : ARM::TRAP));

2646 return true;

2647 }

2648 }

2649}

2650

2651bool ARMFastISel::SelectTrunc(const Instruction *I) {

2652

2653

2654 Value *Op = I->getOperand(0);

2655

2656 EVT SrcVT, DestVT;

2659

2660 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)

2661 return false;

2662 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)

2663 return false;

2664

2665 Register SrcReg = getRegForValue(Op);

2666 if (!SrcReg) return false;

2667

2668

2669

2670 updateValueMap(I, SrcReg);

2671 return true;

2672}

2673

2674Register ARMFastISel::ARMEmitIntExt(MVT SrcVT, Register SrcReg, MVT DestVT,

2675 bool isZExt) {

2676 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)

2678 if (SrcVT != MVT::i16 && SrcVT != MVT::i8 && SrcVT != MVT::i1)

2680

2681

2682

2683 static const uint8_t isSingleInstrTbl[3][2][2][2] = {

2684

2685

2686

2687 { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } },

2688 { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } },

2689 { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }

2690 };

2691

2692

2693

2694

2695

2696 static const TargetRegisterClass *RCTbl[2][2] = {

2697

2698 { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass },

2699 { &ARM::tGPRRegClass, &ARM::rGPRRegClass }

2700 };

2701

2702

2703 static const struct InstructionTable {

2704 uint32_t Opc : 16;

2705 uint32_t hasS : 1;

2706 uint32_t Shift : 7;

2707 uint32_t Imm : 8;

2708 } IT[2][2][3][2] = {

2709 {

2710 {

2711 { { ARM::MOVsi , 1, ARM_AM::asr , 31 },

2712 { ARM::MOVsi , 1, ARM_AM::lsr , 31 } },

2713 { { ARM::MOVsi , 1, ARM_AM::asr , 24 },

2714 { ARM::MOVsi , 1, ARM_AM::lsr , 24 } },

2715 { { ARM::MOVsi , 1, ARM_AM::asr , 16 },

2716 { ARM::MOVsi , 1, ARM_AM::lsr , 16 } }

2717 },

2718 {

2719 { { ARM::tASRri , 0, ARM_AM::no_shift, 31 },

2720 { ARM::tLSRri , 0, ARM_AM::no_shift, 31 } },

2721 { { ARM::tASRri , 0, ARM_AM::no_shift, 24 },

2722 { ARM::tLSRri , 0, ARM_AM::no_shift, 24 } },

2723 { { ARM::tASRri , 0, ARM_AM::no_shift, 16 },

2724 { ARM::tLSRri , 0, ARM_AM::no_shift, 16 } }

2725 }

2726 },

2727 {

2728 {

2732 { ARM::ANDri , 1, ARM_AM::no_shift, 255 } },

2735 },

2736 {

2738 { ARM::t2ANDri, 1, ARM_AM::no_shift, 1 } },

2739 { { ARM::t2SXTB , 0, ARM_AM::no_shift, 0 },

2740 { ARM::t2ANDri, 1, ARM_AM::no_shift, 255 } },

2741 { { ARM::t2SXTH , 0, ARM_AM::no_shift, 0 },

2742 { ARM::t2UXTH , 0, ARM_AM::no_shift, 0 } }

2743 }

2744 }

2745 };

2746

2749 (void) DestBits;

2750 assert((SrcBits < DestBits) && "can only extend to larger types");

2751 assert((DestBits == 32 || DestBits == 16 || DestBits == 8) &&

2752 "other sizes unimplemented");

2753 assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) &&

2754 "other sizes unimplemented");

2755

2756 bool hasV6Ops = Subtarget->hasV6Ops();

2757 unsigned Bitness = SrcBits / 8;

2758 assert((Bitness < 3) && "sanity-check table bounds");

2759

2760 bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt];

2761 const TargetRegisterClass *RC = RCTbl[isThumb2][isSingleInstr];

2762 const InstructionTable *ITP = &IT[isSingleInstr][isThumb2][Bitness][isZExt];

2763 unsigned Opc = ITP->Opc;

2764 assert(ARM::KILL != Opc && "Invalid table entry");

2765 unsigned hasS = ITP->hasS;

2768 "only MOVsi has shift operand addressing mode");

2769 unsigned Imm = ITP->Imm;

2770

2771

2772 bool setsCPSR = &ARM::tGPRRegClass == RC;

2773 unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi;

2775

2776

2777

2779

2780

2781

2782

2783

2784

2785

2786

2787

2788 unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2;

2789 for (unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) {

2790 ResultReg = createResultReg(RC);

2791 bool isLsl = (0 == Instr) && !isSingleInstr;

2792 unsigned Opcode = isLsl ? LSLOpc : Opc;

2795 bool isKill = 1 == Instr;

2796 MachineInstrBuilder MIB = BuildMI(

2797 *FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opcode), ResultReg);

2798 if (setsCPSR)

2804 if (hasS)

2806

2807 SrcReg = ResultReg;

2808 }

2809

2810 return ResultReg;

2811}

2812

2813bool ARMFastISel::SelectIntExt(const Instruction *I) {

2814

2815

2816 Type *DestTy = I->getType();

2817 Value *Src = I->getOperand(0);

2818 Type *SrcTy = Src->getType();

2819

2821 Register SrcReg = getRegForValue(Src);

2822 if (!SrcReg) return false;

2823

2824 EVT SrcEVT, DestEVT;

2827 if (!SrcEVT.isSimple()) return false;

2828 if (!DestEVT.isSimple()) return false;

2829

2832 Register ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt);

2833 if (!ResultReg)

2834 return false;

2835 updateValueMap(I, ResultReg);

2836 return true;

2837}

2838

2839bool ARMFastISel::SelectShift(const Instruction *I,

2841

2842

2843 if (isThumb2)

2844 return false;

2845

2846

2848 if (DestVT != MVT::i32)

2849 return false;

2850

2851 unsigned Opc = ARM::MOVsr;

2852 unsigned ShiftImm;

2853 Value *Src2Value = I->getOperand(1);

2855 ShiftImm = CI->getZExtValue();

2856

2857

2858

2859 if (ShiftImm == 0 || ShiftImm >=32)

2860 return false;

2861

2862 Opc = ARM::MOVsi;

2863 }

2864

2865 Value *Src1Value = I->getOperand(0);

2866 Register Reg1 = getRegForValue(Src1Value);

2867 if (!Reg1)

2868 return false;

2869

2871 if (Opc == ARM::MOVsr) {

2872 Reg2 = getRegForValue(Src2Value);

2873 if (!Reg2)

2874 return false;

2875 }

2876

2877 Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);

2878 if (!ResultReg)

2879 return false;

2880

2881 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

2884

2885 if (Opc == ARM::MOVsi)

2887 else if (Opc == ARM::MOVsr) {

2890 }

2891

2892 AddOptionalDefs(MIB);

2893 updateValueMap(I, ResultReg);

2894 return true;

2895}

2896

2897

2898bool ARMFastISel::fastSelectInstruction(const Instruction *I) {

2899 switch (I->getOpcode()) {

2900 case Instruction::Load:

2901 return SelectLoad(I);

2902 case Instruction::Store:

2903 return SelectStore(I);

2904 case Instruction::Br:

2905 return SelectBranch(I);

2906 case Instruction::IndirectBr:

2907 return SelectIndirectBr(I);

2908 case Instruction::ICmp:

2909 case Instruction::FCmp:

2910 return SelectCmp(I);

2911 case Instruction::FPExt:

2912 return SelectFPExt(I);

2913 case Instruction::FPTrunc:

2914 return SelectFPTrunc(I);

2915 case Instruction::SIToFP:

2916 return SelectIToFP(I, true);

2917 case Instruction::UIToFP:

2918 return SelectIToFP(I, false);

2919 case Instruction::FPToSI:

2920 return SelectFPToI(I, true);

2921 case Instruction::FPToUI:

2922 return SelectFPToI(I, false);

2923 case Instruction::Add:

2924 return SelectBinaryIntOp(I, ISD::ADD);

2925 case Instruction::Or:

2926 return SelectBinaryIntOp(I, ISD::OR);

2927 case Instruction::Sub:

2928 return SelectBinaryIntOp(I, ISD::SUB);

2929 case Instruction::FAdd:

2930 return SelectBinaryFPOp(I, ISD::FADD);

2931 case Instruction::FSub:

2932 return SelectBinaryFPOp(I, ISD::FSUB);

2933 case Instruction::FMul:

2934 return SelectBinaryFPOp(I, ISD::FMUL);

2935 case Instruction::SDiv:

2936 return SelectDiv(I, true);

2937 case Instruction::UDiv:

2938 return SelectDiv(I, false);

2939 case Instruction::SRem:

2940 return SelectRem(I, true);

2941 case Instruction::URem:

2942 return SelectRem(I, false);

2943 case Instruction::Call:

2945 return SelectIntrinsicCall(*II);

2946 return SelectCall(I);

2947 case Instruction::Select:

2948 return SelectSelect(I);

2949 case Instruction::Ret:

2950 return SelectRet(I);

2951 case Instruction::Trunc:

2952 return SelectTrunc(I);

2953 case Instruction::ZExt:

2954 case Instruction::SExt:

2955 return SelectIntExt(I);

2956 case Instruction::Shl:

2958 case Instruction::LShr:

2960 case Instruction::AShr:

2962 default: break;

2963 }

2964 return false;

2965}

2966

2967

2968

2969

2970

2977 { { ARM::SXTH, ARM::t2SXTH }, 0, 0, MVT::i16 },

2978 { { ARM::UXTH, ARM::t2UXTH }, 0, 1, MVT::i16 },

2979 { { ARM::ANDri, ARM::t2ANDri }, 255, 1, MVT::i8 },

2980 { { ARM::SXTB, ARM::t2SXTB }, 0, 0, MVT::i8 },

2981 { { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 }

2983

2984

2985

2986

2987

2988bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,

2989 const LoadInst *LI) {

2990

2991 MVT VT;

2992 if (!isLoadTypeLegal(LI->getType(), VT))

2993 return false;

2994

2995

2996

2997

2998

2999 if (MI->getNumOperands() < 3 || MI->getOperand(2).isImm())

3000 return false;

3001 const uint64_t Imm = MI->getOperand(2).getImm();

3002

3003 bool Found = false;

3004 bool isZExt;

3006 if (FLE.Opc[isThumb2] == MI->getOpcode() &&

3007 (uint64_t)FLE.ExpectedImm == Imm &&

3009 Found = true;

3010 isZExt = FLE.isZExt;

3011 }

3012 }

3013 if (!Found) return false;

3014

3015

3017 if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false;

3018

3019 Register ResultReg = MI->getOperand(0).getReg();

3020 if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlign(), isZExt, false))

3021 return false;

3023 removeDeadCode(I, std::next(I));

3024 return true;

3025}

3026

3027Register ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, MVT VT) {

3028 bool UseGOT_PREL = !GV->isDSOLocal();

3029 LLVMContext *Context = &MF->getFunction().getContext();

3031 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;

3035 UseGOT_PREL);

3036

3037 Align ConstAlign =

3038 MF->getDataLayout().getPrefTypeAlign(PointerType::get(*Context, 0));

3039 unsigned Idx = MF->getConstantPool()->getConstantPoolIndex(CPV, ConstAlign);

3040 MachineMemOperand *CPMMO =

3043

3044 Register TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass);

3045 unsigned Opc = isThumb2 ? ARM::t2LDRpci : ARM::LDRcp;

3046 MachineInstrBuilder MIB =

3047 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), TempReg)

3050 if (Opc == ARM::LDRcp)

3053

3054

3056 Opc = Subtarget->isThumb() ? ARM::tPICADD : UseGOT_PREL ? ARM::PICLDR

3057 : ARM::PICADD;

3059 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg)

3061 .addImm(ARMPCLabelIndex);

3062

3063 if (!Subtarget->isThumb())

3065

3066 if (UseGOT_PREL && Subtarget->isThumb()) {

3068 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

3069 TII.get(ARM::t2LDRi12), NewDestReg)

3072 DestReg = NewDestReg;

3073 AddOptionalDefs(MIB);

3074 }

3075 return DestReg;

3076}

3077

3078bool ARMFastISel::fastLowerArguments() {

3079 if (!FuncInfo.CanLowerReturn)

3080 return false;

3081

3082 const Function *F = FuncInfo.Fn;

3083 if (F->isVarArg())

3084 return false;

3085

3086 CallingConv::ID CC = F->getCallingConv();

3087 switch (CC) {

3088 default:

3089 return false;

3090 case CallingConv::Fast:

3091 case CallingConv::C:

3092 case CallingConv::ARM_AAPCS_VFP:

3093 case CallingConv::ARM_AAPCS:

3094 case CallingConv::ARM_APCS:

3095 case CallingConv::Swift:

3096 case CallingConv::SwiftTail:

3097 break;

3098 }

3099

3100

3101

3102 for (const Argument &Arg : F->args()) {

3103 if (Arg.getArgNo() >= 4)

3104 return false;

3105

3106 if (Arg.hasAttribute(Attribute::InReg) ||

3107 Arg.hasAttribute(Attribute::StructRet) ||

3108 Arg.hasAttribute(Attribute::SwiftSelf) ||

3109 Arg.hasAttribute(Attribute::SwiftError) ||

3110 Arg.hasAttribute(Attribute::ByVal))

3111 return false;

3112

3113 Type *ArgTy = Arg.getType();

3115 return false;

3116

3118 if (!ArgVT.isSimple()) return false;

3120 case MVT::i8:

3121 case MVT::i16:

3122 case MVT::i32:

3123 break;

3124 default:

3125 return false;

3126 }

3127 }

3128

3130 ARM::R0, ARM::R1, ARM::R2, ARM::R3

3131 };

3132

3133 const TargetRegisterClass *RC = &ARM::rGPRRegClass;

3134 for (const Argument &Arg : F->args()) {

3135 unsigned ArgNo = Arg.getArgNo();

3136 MCRegister SrcReg = GPRArgRegs[ArgNo];

3137 Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);

3138

3139

3140

3141 Register ResultReg = createResultReg(RC);

3142 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,

3143 TII.get(TargetOpcode::COPY),

3145 updateValueMap(&Arg, ResultReg);

3146 }

3147

3148 return true;

3149}

3150

3151namespace llvm {

3152

3156 return new ARMFastISel(funcInfo, libInfo);

3157

3158 return nullptr;

3159 }

3160

3161}

unsigned const MachineRegisterInfo * MRI

static const MCPhysReg GPRArgRegs[]

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

const TargetInstrInfo & TII

This file declares a class to represent arbitrary precision floating point values and provide a varie...

This file implements a class to represent arbitrary precision integral constant values and operations...

static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred)

Definition ARMFastISel.cpp:1219

static const struct FoldableLoadExtendsStruct FoldableLoadExtends[]

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))

This file contains the simple types necessary to represent the attributes associated with functions a...

static const Function * getParent(const Value *V)

This file contains the declarations for the subclasses of Constant, which represent the different fla...

This file defines the DenseMap class.

static bool isSigned(unsigned int Opcode)

This file defines the FastISel class.

static Register UseReg(const MachineOperand &MO)

static MaybeAlign getAlign(Value *Ptr)

Module.h This file contains the declarations for the Module class.

Machine Check Debug Module

This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...

Register const TargetRegisterInfo * TRI

Promote Memory to Register

static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)

uint64_t IntrinsicInst * II

const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB

const GCNTargetMachine & getTM(const GCNSubtarget *STI)

This file defines the SmallVector class.

This file describes how to lower LLVM code to machine code.

static const unsigned FramePtr

uint64_t getZExtValue() const

Get zero extended value.

int64_t getSExtValue() const

Get sign extended value.

Register getFrameRegister(const MachineFunction &MF) const override

static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)

bool isThumb2Function() const

unsigned createPICLabelUId()

bool useFastISel() const

True if fast-isel is used.

bool supportSwiftError() const override

Return true if the target supports swifterror attribute.

bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize=false) const override

isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively.

bool supportSplitCSR(MachineFunction *MF) const override

Return true if the target supports that a subset of CSRs for the given machine function is handled ex...

const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const override

getRegClassFor - Return the register class that should be used for the specified value type.

bool hasStandaloneRem(EVT VT) const override

Return true if the target can handle a standalone remainder operation.

PointerType * getType() const

Overload to return most specific pointer type.

BasicBlock * getSuccessor(unsigned i) const

Value * getCondition() const

Register getLocReg() const

LocInfo getLocInfo() const

int64_t getLocMemOffset() const

unsigned getValNo() const

CallingConv::ID getCallingConv() const

LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const

Determine whether the argument or parameter has the given attribute.

User::op_iterator arg_begin()

Return the iterator pointing to the beginning of the argument list.

Value * getCalledOperand() const

User::op_iterator arg_end()

Return the iterator pointing to the end of the argument list.

FunctionType * getFunctionType() const

unsigned arg_size() const

Predicate

This enumeration lists the possible predicates for CmpInst subclasses.

@ FCMP_OEQ

0 0 0 1 True if ordered and equal

@ ICMP_SLT

signed less than

@ ICMP_SLE

signed less or equal

@ FCMP_OLT

0 1 0 0 True if ordered and less than

@ FCMP_ULE

1 1 0 1 True if unordered, less than, or equal

@ FCMP_OGT

0 0 1 0 True if ordered and greater than

@ FCMP_OGE

0 0 1 1 True if ordered and greater than or equal

@ ICMP_UGE

unsigned greater or equal

@ ICMP_UGT

unsigned greater than

@ ICMP_SGT

signed greater than

@ FCMP_ULT

1 1 0 0 True if unordered or less than

@ FCMP_ONE

0 1 1 0 True if ordered and operands are unequal

@ FCMP_UEQ

1 0 0 1 True if unordered or equal

@ ICMP_ULT

unsigned less than

@ FCMP_UGT

1 0 1 0 True if unordered or greater than

@ FCMP_OLE

0 1 0 1 True if ordered and less than or equal

@ FCMP_ORD

0 1 1 1 True if ordered (no nans)

@ ICMP_SGE

signed greater or equal

@ FCMP_UNE

1 1 1 0 True if unordered or not equal

@ ICMP_ULE

unsigned less or equal

@ FCMP_UGE

1 0 1 1 True if unordered, greater than, or equal

@ FCMP_UNO

1 0 0 0 True if unordered: isnan(X) | isnan(Y)

Predicate getInversePredicate() const

For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...

Predicate getPredicate() const

Return the predicate for this instruction.

const APFloat & getValueAPF() const

int64_t getSExtValue() const

Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...

uint64_t getZExtValue() const

Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...

This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...

FunctionLoweringInfo - This contains information that is global to a function that is used when lower...

LLVMContext & getContext() const

getContext - Return a reference to the LLVMContext associated with this function.

bool isThreadLocal() const

If the value is "Thread Local", its value isn't shared by the threads.

PointerType * getType() const

Global values are always pointers.

@ ExternalLinkage

Externally visible function.

Align getAlign() const

Return the alignment of the access that is being performed.

ArrayRef< MCOperandInfo > operands() const

const MCInstrDesc & get(unsigned Opcode) const

Return the machine instruction descriptor that corresponds to the specified instruction opcode.

bool isVector() const

Return true if this is a vector value type.

TypeSize getSizeInBits() const

Returns the size of the specified MVT in bits.

MachineInstrBundleIterator< MachineInstr > iterator

void setFrameAddressIsTaken(bool T)

const TargetSubtargetInfo & getSubtarget() const

getSubtarget - Return the subtarget for which this machine code is being compiled.

Ty * getInfo()

getInfo - Keep track of various per-function pieces of information for backends that would like to do...

const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const

const MachineInstrBuilder & addImm(int64_t Val) const

Add a new immediate operand.

const MachineInstrBuilder & add(const MachineOperand &MO) const

const MachineInstrBuilder & addFrameIndex(int Idx) const

const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const

const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const

const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const

const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const

Add a new virtual register operand.

const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const

const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const

Representation of each machine instruction.

Flags

Flags values. These may be or'd together.

@ MOLoad

The memory access reads data.

@ MOStore

The memory access writes data.

Value * getLength() const

Value * getRawDest() const

MaybeAlign getDestAlign() const

unsigned getDestAddressSpace() const

Value * getRawSource() const

Return the arguments to the instruction.

unsigned getSourceAddressSpace() const

MaybeAlign getSourceAlign() const

constexpr bool isValid() const

constexpr bool isVirtual() const

Return true if the specified register number is in the virtual register namespace.

constexpr unsigned id() const

void reserve(size_type N)

void push_back(const T &Elt)

TypeSize getElementOffset(unsigned Idx) const

unsigned getCallFrameSetupOpcode() const

These methods return the opcode of the frame setup/destroy instructions if they exist (-1 otherwise).

unsigned getCallFrameDestroyOpcode() const

Provides information about what library functions are available for the current target.

EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const

Return the EVT corresponding to this LLVM type.

CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const

Get the CallingConv that should be used for the specified libcall.

bool isTypeLegal(EVT VT) const

Return true if the target has native support for the specified value type.

virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const

Return the pointer type for the given address space, defaults to the pointer type from the data layou...

const char * getLibcallName(RTLIB::Libcall Call) const

Get the libcall routine name for the specified libcall.

bool isPositionIndependent() const

FloatABI::ABIType FloatABIType

FloatABIType - This setting is set by -float-abi=xxx option is specfied on the command line.

bool contains(Register Reg) const

Return true if the specified register is included in this register class.

bool isVectorTy() const

True if this is an instance of VectorType.

bool isArrayTy() const

True if this is an instance of ArrayType.

bool isFloatTy() const

Return true if this is 'float', a 32-bit IEEE fp type.

bool isStructTy() const

True if this is an instance of StructType.

bool isDoubleTy() const

Return true if this is 'double', a 64-bit IEEE fp type.

bool isIntegerTy() const

True if this is an instance of IntegerType.

bool isVoidTy() const

Return true if this is 'void'.

const Use * const_op_iterator

Value * getOperand(unsigned i) const

unsigned getNumOperands() const

Type * getType() const

All values are typed, get the type of this value.

bool hasOneUse() const

Return true if there is exactly one use of this value.

StructType * getStructTypeOrNull() const

TypeSize getSequentialElementStride(const DataLayout &DL) const

const ParentTy * getParent() const

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

constexpr char Align[]

Key for Kernel::Arg::Metadata::mAlign.

constexpr char Args[]

Key for Kernel::Metadata::mArgs.

@ GOT_PREL

Thread Local Storage (General Dynamic Mode)

@ MO_NONLAZY

MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which,...

int getSOImmVal(unsigned Arg)

getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...

int getFP32Imm(const APInt &Imm)

getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.

int getT2SOImmVal(unsigned Arg)

getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...

int getFP64Imm(const APInt &Imm)

getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.

unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)

FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)

Definition ARMFastISel.cpp:3153

@ C

The default llvm calling convention, compatible with C.

@ ADD

Simple integer binary arithmetic operators.

@ FADD

Simple binary floating point operators.

Predicate

Predicate - These are "(BI << 5) | BO" for various predicates.

@ Implicit

Not emitted register (e.g. carry, or temporary result).

@ Define

Register definition.

@ Kill

The last use of a register.

@ User

could "use" a pointer

NodeAddr< InstrNode * > Instr

This is an optimization pass for GlobalISel generic memory operations.

bool RetFastCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)

FunctionAddr VTableAddr Value

LLVM_ABI Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)

Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...

LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)

Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...

MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)

Builder interface. Specify how to create the initial instruction itself.

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)

CCAssignFn - This function assigns a location for Val, updating State to reflect the change.

LLVM_ABI void diagnoseDontCall(const CallInst &CI)

bool CC_ARM_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)

bool RetCC_ARM_AAPCS_VFP(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)

bool RetCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)

bool RetCC_ARM_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)

bool CC_ARM_APCS_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)

static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)

Get the operands corresponding to the given Pred value.

static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)

bool FastCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)

bool CC_ARM_Win32_CFGuard_Check(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)

LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)

generic_gep_type_iterator<> gep_type_iterator

constexpr bool isUInt(uint64_t x)

Checks if an unsigned integer fits into the given bit width.

class LLVM_GSL_OWNER SmallVector

Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

unsigned getKillRegState(bool B)

uint16_t MCPhysReg

An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...

DWARFExpression::Operation Op

static MachineOperand t1CondCodeOp(bool isDead=false)

Get the operand corresponding to the conditional code result for Thumb1.

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

gep_type_iterator gep_type_begin(const User *GEP)

static MachineOperand condCodeOp(unsigned CCReg=0)

Get the operand corresponding to the conditional code result.

unsigned gettBLXrOpcode(const MachineFunction &MF)

bool CC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)

unsigned getBLXOpcode(const MachineFunction &MF)

bool CC_ARM_AAPCS_VFP(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

Definition ARMFastISel.cpp:2971

uint8_t ExpectedVT

Definition ARMFastISel.cpp:2975

uint8_t ExpectedImm

Definition ARMFastISel.cpp:2973

uint8_t isZExt

Definition ARMFastISel.cpp:2974

uint16_t Opc[2]

Definition ARMFastISel.cpp:2972

bool isSimple() const

Test if the given EVT is simple (as opposed to being extended).

MVT getSimpleVT() const

Return the SimpleValueType held in the specified simple EVT.

static LLVM_ABI MachinePointerInfo getConstantPool(MachineFunction &MF)

Return a MachinePointerInfo record that refers to the constant pool.

static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)

Return a MachinePointerInfo record that refers to the specified FrameIndex.

Align valueOrOne() const

For convenience, returns a valid alignment or 1 if undefined.