LLVM: lib/Target/BPF/BPFISelLowering.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

33

34using namespace llvm;

35

36#define DEBUG_TYPE "bpf-lower"

37

40 cl::desc("Expand memcpy into load/store pairs in order"));

41

44 cl::desc("Set minimum number of entries to use a jump table on BPF"));

45

48 std::string Str;

49 if (Val) {

51 Val->print(OS);

52 OS << ' ';

53 }

57}

58

62

63

67

68

70

72

76

79

81

85

89

90

91

92

93 for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {

94 if (VT == MVT::i32) {

96 continue;

97 } else {

99 }

100

106 }

107

108 for (auto VT : {MVT::i32, MVT::i64}) {

111 }

112

113 for (auto VT : { MVT::i32, MVT::i64 }) {

114 if (VT == MVT::i32 && !STI.getHasAlu32())

115 continue;

116

122 }

137

141 }

142

147 }

148

154 }

155

156

161

166 }

167 }

168

172

173

176

178

179

180

181

182

183

184

185

186

187

188

193 } else {

194

195 unsigned CommonMaxStores =

197

202 }

203

204

209

211}

212

215 unsigned *Fast) const {

216

217 if (!AllowsMisalignedMemAccess)

218 return false;

219

220

222 return false;

223

224

226 *Fast = true;

227

228 return true;

229}

230

234

235bool BPFTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {

237 return false;

240 return NumBits1 > NumBits2;

241}

242

243bool BPFTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {

245 return false;

248 return NumBits1 > NumBits2;

249}

250

251bool BPFTargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {

253 return false;

256 return NumBits1 == 32 && NumBits2 == 64;

257}

258

259bool BPFTargetLowering::isZExtFree(EVT VT1, EVT VT2) const {

261 return false;

264 return NumBits1 == 32 && NumBits2 == 64;

265}

266

267bool BPFTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {

272 if ((MT1 == MVT::i8 || MT1 == MVT::i16 || MT1 == MVT::i32) &&

273 (MT2 == MVT::i32 || MT2 == MVT::i64))

274 return true;

275 }

277}

278

282

285 if (Constraint.size() == 1) {

286 switch (Constraint[0]) {

287 default:

288 break;

289 case 'w':

291 }

292 }

293

295}

296

297std::pair<unsigned, const TargetRegisterClass *>

300 MVT VT) const {

301 if (Constraint.size() == 1) {

302

303 switch (Constraint[0]) {

304 case 'r':

305 return std::make_pair(0U, &BPF::GPRRegClass);

306 case 'w':

307 if (HasAlu32)

308 return std::make_pair(0U, &BPF::GPR32RegClass);

309 break;

310 default:

311 break;

312 }

313 }

314

316}

317

318void BPFTargetLowering::ReplaceNodeResults(

320 const char *Msg;

321 uint32_t Opcode = N->getOpcode();

322 switch (Opcode) {

323 default:

325 case ISD::ATOMIC_LOAD_ADD:

326 case ISD::ATOMIC_LOAD_AND:

327 case ISD::ATOMIC_LOAD_OR:

328 case ISD::ATOMIC_LOAD_XOR:

329 case ISD::ATOMIC_SWAP:

330 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:

331 if (HasAlu32 || Opcode == ISD::ATOMIC_LOAD_ADD)

332 Msg = "unsupported atomic operation, please use 32/64 bit version";

333 else

334 Msg = "unsupported atomic operation, please use 64 bit version";

335 break;

336 case ISD::ATOMIC_LOAD:

337 case ISD::ATOMIC_STORE:

338 return;

339 }

340

342

343

345}

346

348 switch (Op.getOpcode()) {

349 default:

351 case ISD::BR_CC:

352 return LowerBR_CC(Op, DAG);

354 return LowerJumpTable(Op, DAG);

356 return LowerGlobalAddress(Op, DAG);

358 return LowerConstantPool(Op, DAG);

360 return LowerBlockAddress(Op, DAG);

362 return LowerSELECT_CC(Op, DAG);

365 return LowerSDIVSREM(Op, DAG);

366 case ISD::DYNAMIC_STACKALLOC:

367 return LowerDYNAMIC_STACKALLOC(Op, DAG);

368 case ISD::ATOMIC_LOAD:

369 case ISD::ATOMIC_STORE:

370 return LowerATOMIC_LOAD_STORE(Op, DAG);

371 case ISD::TRAP:

372 return LowerTRAP(Op, DAG);

373 }

374}

375

376

377#include "BPFGenCallingConv.inc"

378

379SDValue BPFTargetLowering::LowerFormalArguments(

383 switch (CallConv) {

384 default:

388 break;

389 }

390

393

394

396 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());

397 CCInfo.AnalyzeFormalArguments(Ins, getHasAlu32() ? CC_BPF32 : CC_BPF64);

398

399 bool HasMemArgs = false;

400 for (size_t I = 0; I < ArgLocs.size(); ++I) {

401 auto &VA = ArgLocs[I];

402

403 if (VA.isRegLoc()) {

404

405 EVT RegVT = VA.getLocVT();

407 switch (SimpleTy) {

408 default: {

409 std::string Str;

410 {

413 }

415 }

416 case MVT::i32:

417 case MVT::i64:

419 SimpleTy == MVT::i64 ? &BPF::GPRRegClass : &BPF::GPR32RegClass);

420 RegInfo.addLiveIn(VA.getLocReg(), VReg);

422

423

424

431

434

436

437 break;

438 }

439 } else {

440 if (VA.isMemLoc())

441 HasMemArgs = true;

442 else

445 }

446 }

447 if (HasMemArgs)

448 fail(DL, DAG, "stack arguments are not supported");

449 if (IsVarArg)

450 fail(DL, DAG, "variadic functions are not supported");

452 fail(DL, DAG, "aggregate returns are not supported");

453

454 return Chain;

455}

456

457const size_t BPFTargetLowering::MaxArgs = 5;

458

464

467 const uint32_t *BaseRegMask) {

470 memcpy(RegMask, BaseRegMask, sizeof(RegMask[0]) * RegMaskSize);

471 return RegMask;

472}

473

476 SelectionDAG &DAG = CLI.DAG;

477 auto &Outs = CLI.Outs;

478 auto &OutVals = CLI.OutVals;

479 auto &Ins = CLI.Ins;

484 bool IsVarArg = CLI.IsVarArg;

486

487

488 IsTailCall = false;

489

490 switch (CallConv) {

491 default:

492 report_fatal_error("unsupported calling convention: " + Twine(CallConv));

495 break;

496 }

497

498

500 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());

501

502 CCInfo.AnalyzeCallOperands(Outs, getHasAlu32() ? CC_BPF32 : CC_BPF64);

503

504 unsigned NumBytes = CCInfo.getStackSize();

505

506 if (Outs.size() > MaxArgs)

507 fail(CLI.DL, DAG, "too many arguments", Callee);

508

509 for (auto &Arg : Outs) {

510 ISD::ArgFlagsTy Flags = Arg.Flags;

511 if (Flags.isByVal())

512 continue;

513 fail(CLI.DL, DAG, "pass by value not supported", Callee);

514 break;

515 }

516

519

521

522

523 for (size_t i = 0; i < std::min(ArgLocs.size(), MaxArgs); ++i) {

524 CCValAssign &VA = ArgLocs[i];

525 SDValue &Arg = OutVals[i];

526

527

529 default:

532 break;

535 break;

538 break;

541 break;

542 }

543

544

547 else

549 }

550

552

553

554

555

556 for (auto &Reg : RegsToPass) {

559 }

560

561

562

563

566 G->getOffset(), 0);

568 if (StringRef(E->getSymbol()) != BPF_TRAP) {

571 Twine("A call to built-in function '" + StringRef(E->getSymbol()) +

572 "' is not supported."));

573 }

574 }

575

576

577 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);

579 Ops.push_back(Chain);

580 Ops.push_back(Callee);

581

582

583

584 for (auto &Reg : RegsToPass)

586

587 bool HasFastCall =

590 if (HasFastCall) {

593 for (auto const &RegPair : RegsToPass)

598 } else {

599 Ops.push_back(

601 }

602

604 Ops.push_back(InGlue);

605

606 Chain = DAG.getNode(BPFISD::CALL, CLI.DL, NodeTys, Ops);

608

610

611

612 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue, CLI.DL);

614

615

616

617 return LowerCallResult(Chain, InGlue, CallConv, IsVarArg, Ins, CLI.DL, DAG,

618 InVals);

619}

620

623 bool IsVarArg,

627 unsigned Opc = BPFISD::RET_GLUE;

628

629

632

633

634 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());

635

637 fail(DL, DAG, "aggregate returns are not supported");

638 return DAG.getNode(Opc, DL, MVT::Other, Chain);

639 }

640

641

642 CCInfo.AnalyzeReturn(Outs, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);

643

646

647

648 for (size_t i = 0; i != RVLocs.size(); ++i) {

649 CCValAssign &VA = RVLocs[i];

652

654

655

656

659 }

660

661 RetOps[0] = Chain;

662

663

665 RetOps.push_back(Glue);

666

667 return DAG.getNode(Opc, DL, MVT::Other, RetOps);

668}

669

670SDValue BPFTargetLowering::LowerCallResult(

674

676

678 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());

679

680 if (Ins.size() > 1) {

681 fail(DL, DAG, "only small returns supported");

682 for (auto &In : Ins)

685 }

686

687 CCInfo.AnalyzeCallResult(Ins, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);

688

689

690 for (auto &Val : RVLocs) {

692 Val.getValVT(), InGlue).getValue(1);

695 }

696

697 return Chain;

698}

699

701 switch (CC) {

702 default:

703 break;

710 break;

711 }

712}

713

715 SDLoc DL(Op);

717 "unsupported signed division, please convert to unsigned div/mod.");

718 return DAG.getUNDEF(Op->getValueType(0));

719}

720

721SDValue BPFTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,

723 SDLoc DL(Op);

724 fail(DL, DAG, "unsupported dynamic stack allocation");

725 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};

727}

728

730 SDValue Chain = Op.getOperand(0);

735 SDLoc DL(Op);

736

739

740 return DAG.getNode(BPFISD::BR_CC, DL, Op.getValueType(), Chain, LHS, RHS,

742}

743

747 SDValue TrueV = Op.getOperand(2);

748 SDValue FalseV = Op.getOperand(3);

750 SDLoc DL(Op);

751

754

757

758 return DAG.getNode(BPFISD::SELECT_CC, DL, Op.getValueType(), Ops);

759}

760

763 SDNode *N = Op.getNode();

764 SDLoc DL(N);

765

769 "sequentially consistent (seq_cst) "

770 "atomic load/store is not supported");

771

772 return Op;

773}

774

776 if (auto *Fn = M->getFunction(BPF_TRAP))

777 return Fn;

778

785

786 if (M->debug_compile_units().empty())

787 return NewF;

788

796 DINode::FlagZero, DISubprogram::SPFlagZero);

798 return NewF;

799}

800

803 TargetLowering::CallLoweringInfo CLI(DAG);

805 SDNode *N = Op.getNode();

806 SDLoc DL(N);

807

818 return LowerCall(CLI, InVals);

819}

820

823 return getAddr(N, DAG);

824}

825

829 N->getOffset(), Flags);

830}

831

836

837template

839 unsigned Flags) const {

840 SDLoc DL(N);

841

843

844 return DAG.getNode(BPFISD::Wrapper, DL, MVT::i64, GA);

845}

846

850 if (N->getOffset() != 0)

852 Twine(N->getOffset()));

853

854 const GlobalValue *GVal = N->getGlobal();

855 SDLoc DL(Op);

856

857

859

860

862}

863

867

868 return getAddr(N, DAG);

869}

870

874 SDLoc DL(Op);

875

876

878

879

881}

882

883unsigned

887 const TargetRegisterClass *RC = getRegClassFor(MVT::i64);

888 int RShiftOp = isSigned ? BPF::SRA_ri : BPF::SRL_ri;

889 MachineFunction *F = BB->getParent();

891

892 MachineRegisterInfo &RegInfo = F->getRegInfo();

893

897 return PromotedReg0;

898 }

902 if (HasMovsx) {

904 } else {

910 }

911

912 return PromotedReg2;

913}

914

916BPFTargetLowering::EmitInstrWithCustomInserterMemcpy(MachineInstr &MI,

918 const {

919 MachineFunction *MF = MI.getParent()->getParent();

921 MachineInstrBuilder MIB(*MF, MI);

922 unsigned ScratchReg;

923

924

925

926

927

928

929

930

931

932

933

934

935

936

937

938

939 ScratchReg = MRI.createVirtualRegister(&BPF::GPRRegClass);

940 MIB.addReg(ScratchReg,

942

943 return BB;

944}

945

946MachineBasicBlock *BPFTargetLowering::EmitInstrWithCustomInserterLDimm64(

948 MachineFunction *MF = BB->getParent();

949 const BPFInstrInfo *TII = MF->getSubtarget().getInstrInfo();

950 const TargetRegisterClass *RC = getRegClassFor(MVT::i64);

951 MachineRegisterInfo &RegInfo = MF->getRegInfo();

953

954

955 DenseMap<const BasicBlock *, MachineBasicBlock *> AddressTakenBBs;

956 for (MachineBasicBlock &MBB : *MF) {

959 AddressTakenBBs[BB] = &MBB;

960 }

961

962 MachineOperand &MO = MI.getOperand(1);

964

965 MCRegister ResultReg = MI.getOperand(0).getReg();

967

968 std::vector<MachineBasicBlock *> Targets;

969 unsigned JTI;

970

973 MachineBasicBlock *TgtMBB = AddressTakenBBs[BA->getBasicBlock()];

975

976 Targets.push_back(TgtMBB);

978 ->createJumpTableIndex(Targets);

979

985 MI.eraseFromParent();

986 return BB;

987 }

988

989

990 auto emitLDImm64 = [&](const GlobalValue *GV = nullptr, unsigned JTI = -1) {

991 auto MIB = BuildMI(*BB, MI, DL, TII->get(BPF::LD_imm64), ResultReg);

992 if (GV)

993 MIB.addGlobalAddress(GV);

994 else

995 MIB.addJumpTableIndex(JTI);

996 MI.eraseFromParent();

997 return BB;

998 };

999

1000

1001 const GlobalValue *GVal = MO.getGlobal();

1003

1005 !GV->isConstant() || !GV->hasInitializer())

1006 return emitLDImm64(GVal);

1007

1009 if (!CA)

1010 return emitLDImm64(GVal);

1011

1012 for (const Use &Op : CA->operands()) {

1014 return emitLDImm64(GVal);

1016 MachineBasicBlock *TgtMBB = AddressTakenBBs[BA->getBasicBlock()];

1018 Targets.push_back(TgtMBB);

1019 }

1020

1022 ->createJumpTableIndex(Targets);

1023 return emitLDImm64(nullptr, JTI);

1024}

1025

1031 unsigned Opc = MI.getOpcode();

1032 bool isSelectRROp = (Opc == BPF::Select ||

1033 Opc == BPF::Select_64_32 ||

1034 Opc == BPF::Select_32 ||

1035 Opc == BPF::Select_32_64);

1036

1037 bool isMemcpyOp = Opc == BPF::MEMCPY;

1038 bool isLDimm64Op = Opc == BPF::LDIMM64;

1039

1040#ifndef NDEBUG

1041 bool isSelectRIOp = (Opc == BPF::Select_Ri ||

1042 Opc == BPF::Select_Ri_64_32 ||

1043 Opc == BPF::Select_Ri_32 ||

1044 Opc == BPF::Select_Ri_32_64);

1045

1046 if (!(isSelectRROp || isSelectRIOp || isMemcpyOp || isLDimm64Op))

1048#endif

1049

1050 if (isMemcpyOp)

1051 return EmitInstrWithCustomInserterMemcpy(MI, BB);

1052

1053 if (isLDimm64Op)

1054 return EmitInstrWithCustomInserterLDimm64(MI, BB);

1055

1056 bool is32BitCmp = (Opc == BPF::Select_32 ||

1057 Opc == BPF::Select_32_64 ||

1058 Opc == BPF::Select_Ri_32 ||

1059 Opc == BPF::Select_Ri_32_64);

1060

1061

1062

1063

1064

1067

1068

1069

1070

1071

1072

1077

1078 F->insert(I, Copy0MBB);

1079 F->insert(I, Copy1MBB);

1080

1081

1085

1088

1089

1090 int CC = MI.getOperand(3).getImm();

1091 int NewCC;

1092 switch (CC) {

1093#define SET_NEWCC(X, Y) \

1094 case ISD::X: \

1095 if (is32BitCmp && HasJmp32) \

1096 NewCC = isSelectRROp ? BPF::Y##_rr_32 : BPF::Y##_ri_32; \

1097 else \

1098 NewCC = isSelectRROp ? BPF::Y##_rr : BPF::Y##_ri; \

1099 break

1110 default:

1112 }

1113

1114 Register LHS = MI.getOperand(1).getReg();

1115 bool isSignedCmp = (CC == ISD::SETGT ||

1119

1120

1121

1122

1123

1124

1125

1126

1127 if (is32BitCmp && !HasJmp32)

1128 LHS = EmitSubregExt(MI, BB, LHS, isSignedCmp);

1129

1130 if (isSelectRROp) {

1131 Register RHS = MI.getOperand(2).getReg();

1132

1133 if (is32BitCmp && !HasJmp32)

1134 RHS = EmitSubregExt(MI, BB, RHS, isSignedCmp);

1135

1137 } else {

1138 int64_t imm32 = MI.getOperand(2).getImm();

1139

1144 }

1145

1146

1147

1148

1149 BB = Copy0MBB;

1150

1151

1153

1154

1155

1156

1157 BB = Copy1MBB;

1158 BuildMI(*BB, BB->begin(), DL, TII.get(BPF::PHI), MI.getOperand(0).getReg())

1159 .addReg(MI.getOperand(5).getReg())

1161 .addReg(MI.getOperand(4).getReg())

1163

1164 MI.eraseFromParent();

1165 return BB;

1166}

1167

1169 EVT VT) const {

1170 return getHasAlu32() ? MVT::i32 : MVT::i64;

1171}

1172

1174 EVT VT) const {

1175 return (getHasAlu32() && VT == MVT::i32) ? MVT::i32 : MVT::i64;

1176}

1177

1178bool BPFTargetLowering::isLegalAddressingMode(const DataLayout &DL,

1180 unsigned AS,

1182

1183 if (AM.BaseGV)

1184 return false;

1185

1186 switch (AM.Scale) {

1187 case 0:

1188 break;

1189 case 1:

1190 if (!AM.HasBaseReg)

1191 break;

1192 return false;

1193 default:

1194 return false;

1195 }

1196

1197 return true;

1198}

unsigned const MachineRegisterInfo * MRI

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

const TargetInstrInfo & TII

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

Function Alias Analysis Results

static uint32_t * regMaskFromTemplate(const TargetRegisterInfo *TRI, MachineFunction &MF, const uint32_t *BaseRegMask)

Definition BPFISelLowering.cpp:465

static Function * createBPFUnreachable(Module *M)

Definition BPFISelLowering.cpp:775

static SDValue getTargetNode(ConstantPoolSDNode *N, const SDLoc &DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)

Definition BPFISelLowering.cpp:826

static cl::opt< bool > BPFExpandMemcpyInOrder("bpf-expand-memcpy-in-order", cl::Hidden, cl::init(false), cl::desc("Expand memcpy into load/store pairs in order"))

static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg, SDValue Val={})

Definition BPFISelLowering.cpp:46

static cl::opt< unsigned > BPFMinimumJumpTableEntries("bpf-min-jump-table-entries", cl::init(13), cl::Hidden, cl::desc("Set minimum number of entries to use a jump table on BPF"))

static void resetRegMaskBit(const TargetRegisterInfo *TRI, uint32_t *RegMask, MCRegister Reg)

Definition BPFISelLowering.cpp:459

static void NegateCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC)

Definition BPFISelLowering.cpp:700

static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")

static bool isSigned(unsigned int Opcode)

Module.h This file contains the declarations for the Module class.

const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]

Register const TargetRegisterInfo * TRI

Promote Memory to Register

unsigned getCommonMaxStoresPerMemFunc() const

bool getAllowsMisalignedMemAccess() const

bool getHasJmpExt() const

const BPFSelectionDAGInfo * getSelectionDAGInfo() const override

const BPFRegisterInfo * getRegisterInfo() const override

BPFTargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override

Given a constraint, return the type of constraint it is for this target.

Definition BPFISelLowering.cpp:284

unsigned getJumpTableEncoding() const override

Return the entry encoding for a jump table in the current function.

Definition BPFISelLowering.cpp:279

bool getHasJmpExt() const

bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override

Return true if folding a constant offset with the given GlobalAddress is legal.

Definition BPFISelLowering.cpp:231

bool allowsMisalignedMemoryAccesses(EVT VT, unsigned, Align, MachineMemOperand::Flags, unsigned *) const override

Determine if the target supports unaligned memory accesses.

Definition BPFISelLowering.cpp:213

EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override

Return the ValueType of the result of SETCC operations.

Definition BPFISelLowering.cpp:1168

BPFTargetLowering(const TargetMachine &TM, const BPFSubtarget &STI)

Definition BPFISelLowering.cpp:59

SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override

This callback is invoked for operations that are unsupported by the target, which are registered to u...

Definition BPFISelLowering.cpp:347

MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override

This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...

Definition BPFISelLowering.cpp:1027

MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override

Return the type to use for a scalar shift opcode, given the shifted amount type.

Definition BPFISelLowering.cpp:1173

std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override

Given a physical register constraint (e.g.

Definition BPFISelLowering.cpp:298

LLVM Basic Block Representation.

BasicBlock * getBasicBlock() const

CCState - This class holds information needed while lowering arguments and return values.

Register getLocReg() const

LocInfo getLocInfo() const

bool hasFnAttr(Attribute::AttrKind Kind) const

Determine whether this call has the given attribute.

LLVM_ABI DISubroutineType * createSubroutineType(DITypeRefArray ParameterTypes, DINode::DIFlags Flags=DINode::FlagZero, unsigned CC=0)

Create subroutine type.

LLVM_ABI DISubprogram * createFunction(DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File, unsigned LineNo, DISubroutineType *Ty, unsigned ScopeLine, DINode::DIFlags Flags=DINode::FlagZero, DISubprogram::DISPFlags SPFlags=DISubprogram::SPFlagZero, DITemplateParameterArray TParams=nullptr, DISubprogram *Decl=nullptr, DITypeArray ThrownTypes=nullptr, DINodeArray Annotations=nullptr, StringRef TargetFuncName="", bool UseKeyInstructions=false)

Create a new descriptor for the specified subprogram.

LLVM_ABI DITypeRefArray getOrCreateTypeArray(ArrayRef< Metadata * > Elements)

Get a DITypeRefArray, create one if required.

Subprogram description. Uses SubclassData1.

Type array for a subprogram.

A parsed version of the target data layout string in and methods for querying it.

Diagnostic information for unsupported feature in backend.

static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)

This static method is the primary way of constructing a FunctionType.

void setSubprogram(DISubprogram *SP)

Set the attached subprogram.

static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)

bool hasStructRetAttr() const

Determine if the function returns a structure through first or second pointer argument.

Type * getReturnType() const

Returns the type of the ret val.

void setCallingConv(CallingConv::ID CC)

LLVM_ABI void setSection(StringRef S)

Change the section for this global.

LinkageTypes getLinkage() const

Module * getParent()

Get the module that this global value is contained inside of...

void setDSOLocal(bool Local)

@ PrivateLinkage

Like Internal, but omit from symbol table.

@ ExternalWeakLinkage

ExternalWeak linkage description.

This is an important class for using LLVM in a threaded context.

LLVM_ABI void diagnose(const DiagnosticInfo &DI)

Report a message to the currently installed diagnostic handler.

const MCInstrDesc & get(unsigned Opcode) const

Return the machine instruction descriptor that corresponds to the specified instruction opcode.

Wrapper class representing physical registers. Should be passed by value.

static auto integer_valuetypes()

LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)

Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...

const BasicBlock * getBasicBlock() const

Return the LLVM basic block that this instance corresponded to originally.

bool hasAddressTaken() const

Test whether this block is used as something other than the target of a terminator,...

LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())

Add Succ as a successor of this MachineBasicBlock.

const MachineFunction * getParent() const

Return the MachineFunction containing this basic block.

void splice(iterator Where, MachineBasicBlock *Other, iterator From)

Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...

MachineInstrBundleIterator< MachineInstr > iterator

const TargetSubtargetInfo & getSubtarget() const

getSubtarget - Return the subtarget for which this machine code is being compiled.

uint32_t * allocateRegMask()

Allocate and initialize a register mask with NumRegister bits.

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

const DataLayout & getDataLayout() const

Return the DataLayout attached to the Module associated to this MF.

Function & getFunction()

Return the LLVM function that this machine code represents.

BasicBlockListType::iterator iterator

const MachineInstrBuilder & addImm(int64_t Val) const

Add a new immediate operand.

const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const

Add a new virtual register operand.

const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const

const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const

Representation of each machine instruction.

@ EK_BlockAddress

EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.

Flags

Flags values. These may be or'd together.

const GlobalValue * getGlobal() const

const BlockAddress * getBlockAddress() const

static unsigned getRegMaskSize(unsigned NumRegs)

Returns number of elements needed for a regmask array.

bool isGlobal() const

isGlobal - Tests if this is a MO_GlobalAddress operand.

bool isBlockAddress() const

isBlockAddress - Tests if this is a MO_BlockAddress operand.

MachineRegisterInfo - Keep track of information for virtual and physical registers,...

LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")

createVirtualRegister - Create and return a new virtual register in the function with the specified r...

void addLiveIn(MCRegister Reg, Register vreg=Register())

addLiveIn - Add the specified register as a live-in.

A Module instance is used to store all the information related to an LLVM module.

Wrapper class representing virtual and physical registers.

Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...

Represents one node in the SelectionDAG.

const SDValue & getOperand(unsigned Num) const

Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.

SDNode * getNode() const

get the SDNode which holds the desired result

SDValue getValue(unsigned R) const

EVT getValueType() const

Return the ValueType of the referenced return value.

unsigned getOpcode() const

This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...

SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)

SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)

LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)

Create a MERGE_VALUES node from the given operands.

LLVM_ABI SDVTList getVTList(EVT VT)

Return an SDVTList that represents the list of values specified.

LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)

These are used for target selectors to create a new node with specified return type(s),...

LLVM_ABI SDValue getRegister(Register Reg, EVT VT)

void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)

Set NoMergeSiteInfo to be associated with Node if NoMerge is true.

SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)

SDValue getUNDEF(EVT VT)

Return an UNDEF node. UNDEF does not have a useful SDLoc.

SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)

Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).

SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)

LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)

Create a ConstantSDNode wrapping a constant value.

SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)

Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...

LLVM_ABI SDValue getValueType(EVT)

LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)

Gets or creates the specified node.

SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)

MachineFunction & getMachineFunction() const

LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)

LLVMContext * getContext() const

LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)

SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

StringRef - Represent a constant reference to a string, i.e.

constexpr size_t size() const

size - Get the string size.

TargetInstrInfo - Interface to description of machine instruction set.

void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)

Indicate that the specified operation does not work with the specified type and indicate what to do a...

unsigned MaxStoresPerMemcpyOptSize

Likewise for functions with the OptSize attribute.

virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const

Return the register class that should be used for the specified value type.

unsigned MaxLoadsPerMemcmp

Specify maximum number of load instructions per memcmp call.

virtual bool isZExtFree(Type *FromTy, Type *ToTy) const

Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...

void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)

Set the maximum atomic operation size supported by the backend.

void setMinFunctionAlignment(Align Alignment)

Set the target's minimum function alignment.

unsigned MaxStoresPerMemsetOptSize

Likewise for functions with the OptSize attribute.

void setBooleanContents(BooleanContent Ty)

Specify how the target extends the result of integer and floating point boolean values from i1 to a w...

unsigned MaxStoresPerMemmove

Specify maximum number of store instructions per memmove call.

void computeRegisterProperties(const TargetRegisterInfo *TRI)

Once all of the register classes are added, this allows us to compute derived properties we expose.

unsigned MaxStoresPerMemmoveOptSize

Likewise for functions with the OptSize attribute.

void addRegisterClass(MVT VT, const TargetRegisterClass *RC)

Add the specified register class as an available regclass for the specified value type.

virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const

Return the pointer type for the given address space, defaults to the pointer type from the data layou...

void setPrefFunctionAlignment(Align Alignment)

Set the target's preferred function alignment.

unsigned MaxStoresPerMemset

Specify maximum number of store instructions per memset call.

void setMinimumJumpTableEntries(unsigned Val)

Indicate the minimum number of blocks to generate jump tables.

@ ZeroOrOneBooleanContent

unsigned MaxLoadsPerMemcmpOptSize

Likewise for functions with the OptSize attribute.

void setStackPointerRegisterToSaveRestore(Register R)

If set to a physical register, this specifies the register that llvm.savestack/llvm....

void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)

Indicate that the specified load with extension does not work with the specified type and indicate wh...

unsigned MaxStoresPerMemcpy

Specify maximum number of store instructions per memcpy call.

virtual ConstraintType getConstraintType(StringRef Constraint) const

Given a constraint, return the type of constraint it is for this target.

virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const

Given a physical register constraint (e.g.

TargetLowering(const TargetLowering &)=delete

Primary interface to the complete machine description for the target machine.

TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...

virtual const TargetInstrInfo * getInstrInfo() const

virtual const TargetRegisterInfo * getRegisterInfo() const =0

Return the target's register information.

Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...

The instances of the Type class are immutable: once they are created, they are never changed.

static LLVM_ABI Type * getVoidTy(LLVMContext &C)

LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY

Return the basic size of this type if it is a primitive type.

bool isAggregateType() const

Return true if the type is an aggregate type.

bool isIntegerTy() const

True if this is an instance of IntegerType.

bool isVoidTy() const

Return true if this is 'void'.

Type * getType() const

All values are typed, get the type of this value.

self_iterator getIterator()

A raw_ostream that writes to an std::string.

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

@ PreserveAll

Used for runtime calls that preserves (almost) all registers.

@ Fast

Attempts to make calls as fast as possible (e.g.

@ C

The default llvm calling convention, compatible with C.

@ SETCC

SetCC operator - This evaluates to a true value iff the condition is true.

@ SMUL_LOHI

SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...

@ BSWAP

Byte Swap and Counting operators.

@ ANY_EXTEND

ANY_EXTEND - Used for integer types. The high bits are undefined.

@ SDIVREM

SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.

@ SIGN_EXTEND

Conversion operators.

@ CTTZ_ZERO_UNDEF

Bit counting operators with an undefined result for zero inputs.

@ SELECT

Select(COND, TRUEVAL, FALSEVAL).

@ MULHU

MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...

@ ZERO_EXTEND

ZERO_EXTEND - Used for integer types, zeroing the new bits.

@ SELECT_CC

Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...

@ SIGN_EXTEND_INREG

SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...

@ TRUNCATE

TRUNCATE - Completely drop the high bits.

@ SHL_PARTS

SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.

@ AssertSext

AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...

LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)

Return the operation corresponding to (Y op X) when given the operation for (X op Y).

CondCode

ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...

@ Define

Register definition.

@ EarlyClobber

Register definition happens before uses.

initializer< Ty > init(const Ty &Val)

This is an optimization pass for GlobalISel generic memory operations.

MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)

Builder interface. Specify how to create the initial instruction itself.

constexpr bool isInt(int64_t x)

Checks if an integer fits into the given bit width.

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)

Returns a concatenated range across two or more ranges.

LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)

class LLVM_GSL_OWNER SmallVector

Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

uint16_t MCPhysReg

An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...

DWARFExpression::Operation Op

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

This struct is a compact representation of a valid (non-zero power of two) alignment.

bool isSimple() const

Test if the given EVT is simple (as opposed to being extended).

TypeSize getSizeInBits() const

Return the size of the specified value type in bits.

MVT getSimpleVT() const

Return the SimpleValueType held in the specified simple EVT.

void print(raw_ostream &OS) const

Implement operator<<.

bool isInteger() const

Return true if this is an integer or a vector integer type.

This structure contains all information that is necessary for lowering calls.

SmallVector< ISD::InputArg, 32 > Ins

SmallVector< ISD::OutputArg, 32 > Outs

SmallVector< SDValue, 32 > OutVals