LLVM: lib/CodeGen/GlobalISel/Utils.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

38#include

39#include

40

41#define DEBUG_TYPE "globalisel-utils"

42

43using namespace llvm;

44using namespace MIPatternMatch;

45

51 return MRI.createVirtualRegister(&RegClass);

52

53 return Reg;

54}

55

62

63 assert(Reg.isVirtual() && "PhysReg not implemented");

64

65

66

67

68

69 auto *OldRegClass = MRI.getRegClassOrNull(Reg);

71

72

73 if (ConstrainedReg != Reg) {

76

77

78 if (RegMO.isUse()) {

80 TII.get(TargetOpcode::COPY), ConstrainedReg)

82 } else {

83 assert(RegMO.isDef() && "Must be a definition");

85 TII.get(TargetOpcode::COPY), Reg)

86 .addReg(ConstrainedReg);

87 }

89 Observer->changingInstr(*RegMO.getParent());

90 }

91 RegMO.setReg(ConstrainedReg);

93 Observer->changedInstr(*RegMO.getParent());

94 }

95 } else if (OldRegClass != MRI.getRegClassOrNull(Reg)) {

97 if (!RegMO.isDef()) {

99 Observer->changedInstr(*RegDef);

100 }

101 Observer->changingAllUsesOfReg(MRI, Reg);

102 Observer->finishedChangingAllUsesOfReg();

103 }

104 }

105 return ConstrainedReg;

106}

107

114

115 assert(Reg.isVirtual() && "PhysReg not implemented");

116

118

119

120

121

122

123 if (OpRC) {

124

125

126

127

128 if (const auto *SubRC = TRI.getCommonSubClass(

129 OpRC, TRI.getConstrainedRegClassForOperand(RegMO, MRI)))

130 OpRC = SubRC;

131

132 OpRC = TRI.getAllocatableClass(OpRC);

133 }

134

135 if (!OpRC) {

137 "Register class constraint is required unless either the "

138 "instruction is target independent or the operand is a use");

139

140

141

142

143

144

145

146

147

148

149 return Reg;

150 }

152 RegMO);

153}

154

160 "A selected instruction is expected");

164

165 for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {

167

168

170 continue;

171

172 LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');

173 assert(MO.isReg() && "Unsupported non-reg operand");

174

176

177 if (Reg.isPhysical())

178 continue;

179

180

181

182 if (Reg == 0)

183 continue;

184

185

186

187

189

190

191

192 if (MO.isUse()) {

193 int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);

194 if (DefIdx != -1 && I.isRegTiedToUseOperand(DefIdx))

195 I.tieOperands(DefIdx, OpI);

196 }

197 }

198 return true;

199}

200

203

205 return false;

206

207 if (MRI.getType(DstReg) != MRI.getType(SrcReg))

208 return false;

209

210

211 const auto &DstRBC = MRI.getRegClassOrRegBank(DstReg);

212 if (!DstRBC || DstRBC == MRI.getRegClassOrRegBank(SrcReg))

213 return true;

214

215

216

217 return isa<const RegisterBank *>(DstRBC) && MRI.getRegClassOrNull(SrcReg) &&

218 cast<const RegisterBank *>(DstRBC)->covers(

219 *MRI.getRegClassOrNull(SrcReg));

220}

221

224

225

226

227 for (const auto &MO : MI.all_defs()) {

229 if (Reg.isPhysical() || MRI.use_nodbg_empty(Reg))

230 return false;

231 }

232 return MI.wouldBeTriviallyDead();

233}

234

240 bool IsFatal = Severity == DS_Error &&

242

243

244 if (!R.getLocation().isValid() || IsFatal)

245 R << (" (in function: " + MF.getName() + ")").str();

246

247 if (IsFatal)

249 else

250 MORE.emit(R);

251}

252

257}

258

262 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);

264}

265

271 MI.getDebugLoc(), MI.getParent());

272 R << Msg;

273

277}

278

280 switch (MinMaxOpc) {

281 case TargetOpcode::G_SMIN:

282 return TargetOpcode::G_SMAX;

283 case TargetOpcode::G_SMAX:

284 return TargetOpcode::G_SMIN;

285 case TargetOpcode::G_UMIN:

286 return TargetOpcode::G_UMAX;

287 case TargetOpcode::G_UMAX:

288 return TargetOpcode::G_UMIN;

289 default:

291 }

292}

293

297 VReg, MRI, false);

298 assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&

299 "Value found while looking through instrs");

300 if (!ValAndVReg)

301 return std::nullopt;

302 return ValAndVReg->Value;

303}

304

308 assert((Const && Const->getOpcode() == TargetOpcode::G_CONSTANT) &&

309 "expected a G_CONSTANT on Reg");

310 return Const->getOperand(1).getCImm()->getValue();

311}

312

313std::optional<int64_t>

316 if (Val && Val->getBitWidth() <= 64)

317 return Val->getSExtValue();

318 return std::nullopt;

319}

320

321namespace {

322

323

324

325

326

327

328

329

330

331

332

335std::optional

337 bool LookThroughInstrs = true,

338 bool LookThroughAnyExt = false) {

341

342 while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI) &&

343 LookThroughInstrs) {

344 switch (MI->getOpcode()) {

345 case TargetOpcode::G_ANYEXT:

346 if (!LookThroughAnyExt)

347 return std::nullopt;

348 [[fallthrough]];

349 case TargetOpcode::G_TRUNC:

350 case TargetOpcode::G_SEXT:

351 case TargetOpcode::G_ZEXT:

352 SeenOpcodes.push_back(std::make_pair(

353 MI->getOpcode(),

354 MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));

355 VReg = MI->getOperand(1).getReg();

356 break;

357 case TargetOpcode::COPY:

358 VReg = MI->getOperand(1).getReg();

360 return std::nullopt;

361 break;

362 case TargetOpcode::G_INTTOPTR:

363 VReg = MI->getOperand(1).getReg();

364 break;

365 default:

366 return std::nullopt;

367 }

368 }

369 if (MI || !IsConstantOpcode(MI))

370 return std::nullopt;

371

373 if (!GetAPCstValue(MI, Val))

374 return std::nullopt;

375 for (auto &Pair : reverse(SeenOpcodes)) {

376 switch (Pair.first) {

377 case TargetOpcode::G_TRUNC:

378 Val = Val.trunc(Pair.second);

379 break;

380 case TargetOpcode::G_ANYEXT:

381 case TargetOpcode::G_SEXT:

382 Val = Val.sext(Pair.second);

383 break;

384 case TargetOpcode::G_ZEXT:

385 Val = Val.zext(Pair.second);

386 break;

387 }

388 }

389

391}

392

394 if (MI)

395 return false;

396 return MI->getOpcode() == TargetOpcode::G_CONSTANT;

397}

398

400 if (MI)

401 return false;

402 return MI->getOpcode() == TargetOpcode::G_FCONSTANT;

403}

404

406 if (MI)

407 return false;

408 unsigned Opc = MI->getOpcode();

409 return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;

410}

411

414 if (!CstVal.isCImm())

415 return false;

417 return true;

418}

419

424 else if (CstVal.isFPImm())

426 else

427 return false;

428 return true;

429}

430

431}

432

435 return getConstantVRegValWithLookThrough<isIConstant, getCImmAsAPInt>(

436 VReg, MRI, LookThroughInstrs);

437}

438

441 bool LookThroughAnyExt) {

442 return getConstantVRegValWithLookThrough<isAnyConstant,

443 getCImmOrFPImmAsAPInt>(

444 VReg, MRI, LookThroughInstrs, LookThroughAnyExt);

445}

446

449 auto Reg =

450 getConstantVRegValWithLookThrough<isFConstant, getCImmOrFPImmAsAPInt>(

451 VReg, MRI, LookThroughInstrs);

452 if (!Reg)

453 return std::nullopt;

455 Reg->VReg};

456}

457

461 if (TargetOpcode::G_FCONSTANT != MI->getOpcode())

462 return nullptr;

463 return MI->getOperand(1).getFPImm();

464}

465

466std::optional

469 auto *DefMI = MRI.getVRegDef(Reg);

471 if (!DstTy.isValid())

472 return std::nullopt;

476 auto SrcTy = MRI.getType(SrcReg);

477 if (!SrcTy.isValid())

478 break;

479 DefMI = MRI.getVRegDef(SrcReg);

480 DefSrcReg = SrcReg;

482 }

484}

485

488 std::optional DefSrcReg =

490 return DefSrcReg ? DefSrcReg->MI : nullptr;

491}

492

495 std::optional DefSrcReg =

497 return DefSrcReg ? DefSrcReg->Reg : Register();

498}

499

504 for (int i = 0; i < NumParts; ++i)

505 VRegs.push_back(MRI.createGenericVirtualRegister(Ty));

507}

508

514 assert(!LeftoverTy.isValid() && "this is an out argument");

515

518 unsigned NumParts = RegSize / MainSize;

519 unsigned LeftoverSize = RegSize - NumParts * MainSize;

520

521

522 if (LeftoverSize == 0) {

523 for (unsigned I = 0; I < NumParts; ++I)

524 VRegs.push_back(MRI.createGenericVirtualRegister(MainTy));

526 return true;

527 }

528

529

530

531

532

533

537 unsigned LeftoverNumElts = RegNumElts % MainNumElts;

538

539 if (MainNumElts % LeftoverNumElts == 0 &&

540 RegNumElts % LeftoverNumElts == 0 &&

542 LeftoverNumElts > 1) {

544

545

547 extractParts(Reg, LeftoverTy, RegNumElts / LeftoverNumElts, UnmergeValues,

548 MIRBuilder, MRI);

549

550

551 unsigned LeftoverPerMain = MainNumElts / LeftoverNumElts;

552 unsigned NumOfLeftoverVal =

553 ((RegNumElts % MainNumElts) / LeftoverNumElts);

554

555

557 for (unsigned I = 0; I < UnmergeValues.size() - NumOfLeftoverVal; I++) {

558 MergeValues.push_back(UnmergeValues[I]);

559 if (MergeValues.size() == LeftoverPerMain) {

562 MergeValues.clear();

563 }

564 }

565

566 for (unsigned I = UnmergeValues.size() - NumOfLeftoverVal;

567 I < UnmergeValues.size(); I++) {

568 LeftoverRegs.push_back(UnmergeValues[I]);

569 }

570 return true;

571 }

572 }

573

578 for (unsigned i = 0; i < RegPieces.size() - 1; ++i)

580 LeftoverRegs.push_back(RegPieces[RegPieces.size() - 1]);

581 LeftoverTy = MRI.getType(LeftoverRegs[0]);

582 return true;

583 }

584

586

587 for (unsigned I = 0; I != NumParts; ++I) {

588 Register NewReg = MRI.createGenericVirtualRegister(MainTy);

590 MIRBuilder.buildExtract(NewReg, Reg, MainSize * I);

591 }

592

594 Offset += LeftoverSize) {

595 Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy);

598 }

599

600 return true;

601}

602

607 LLT RegTy = MRI.getType(Reg);

608 assert(RegTy.isVector() && "Expected a vector type");

609

613 unsigned LeftoverNumElts = RegNumElts % NumElts;

614 unsigned NumNarrowTyPieces = RegNumElts / NumElts;

615

616

617 if (LeftoverNumElts == 0)

618 return extractParts(Reg, NarrowTy, NumNarrowTyPieces, VRegs, MIRBuilder,

620

621

622

623

625 extractParts(Reg, EltTy, RegNumElts, Elts, MIRBuilder, MRI);

626

628

629 for (unsigned i = 0; i < NumNarrowTyPieces; ++i, Offset += NumElts) {

632 }

633

634

635 if (LeftoverNumElts == 1) {

637 } else {

642 }

643}

644

649}

650

652 if (Size == 32)

653 return APFloat(float(Val));

654 if (Size == 64)

656 if (Size != 16)

658 bool Ignored;

660 APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);

661 return APF;

662}

663

669 if (!MaybeOp2Cst)

670 return std::nullopt;

671

673 if (!MaybeOp1Cst)

674 return std::nullopt;

675

676 const APInt &C1 = MaybeOp1Cst->Value;

677 const APInt &C2 = MaybeOp2Cst->Value;

678 switch (Opcode) {

679 default:

680 break;

681 case TargetOpcode::G_ADD:

682 return C1 + C2;

683 case TargetOpcode::G_PTR_ADD:

684

685

687 case TargetOpcode::G_AND:

688 return C1 & C2;

689 case TargetOpcode::G_ASHR:

690 return C1.ashr(C2);

691 case TargetOpcode::G_LSHR:

692 return C1.lshr(C2);

693 case TargetOpcode::G_MUL:

694 return C1 * C2;

695 case TargetOpcode::G_OR:

696 return C1 | C2;

697 case TargetOpcode::G_SHL:

698 return C1 << C2;

699 case TargetOpcode::G_SUB:

700 return C1 - C2;

701 case TargetOpcode::G_XOR:

702 return C1 ^ C2;

703 case TargetOpcode::G_UDIV:

704 if (!C2.getBoolValue())

705 break;

706 return C1.udiv(C2);

707 case TargetOpcode::G_SDIV:

708 if (!C2.getBoolValue())

709 break;

710 return C1.sdiv(C2);

711 case TargetOpcode::G_UREM:

712 if (!C2.getBoolValue())

713 break;

714 return C1.urem(C2);

715 case TargetOpcode::G_SREM:

716 if (!C2.getBoolValue())

717 break;

718 return C1.srem(C2);

719 case TargetOpcode::G_SMIN:

721 case TargetOpcode::G_SMAX:

723 case TargetOpcode::G_UMIN:

725 case TargetOpcode::G_UMAX:

727 }

728

729 return std::nullopt;

730}

731

732std::optional

736 if (!Op2Cst)

737 return std::nullopt;

738

740 if (!Op1Cst)

741 return std::nullopt;

742

745 switch (Opcode) {

746 case TargetOpcode::G_FADD:

747 C1.add(C2, APFloat::rmNearestTiesToEven);

748 return C1;

749 case TargetOpcode::G_FSUB:

750 C1.subtract(C2, APFloat::rmNearestTiesToEven);

751 return C1;

752 case TargetOpcode::G_FMUL:

753 C1.multiply(C2, APFloat::rmNearestTiesToEven);

754 return C1;

755 case TargetOpcode::G_FDIV:

756 C1.divide(C2, APFloat::rmNearestTiesToEven);

757 return C1;

758 case TargetOpcode::G_FREM:

759 C1.mod(C2);

760 return C1;

761 case TargetOpcode::G_FCOPYSIGN:

763 return C1;

764 case TargetOpcode::G_FMINNUM:

765 return minnum(C1, C2);

766 case TargetOpcode::G_FMAXNUM:

767 return maxnum(C1, C2);

768 case TargetOpcode::G_FMINIMUM:

770 case TargetOpcode::G_FMAXIMUM:

772 case TargetOpcode::G_FMINNUM_IEEE:

773 case TargetOpcode::G_FMAXNUM_IEEE:

774

775

776

777

778 break;

779 default:

780 break;

781 }

782

783 return std::nullopt;

784}

785

790 auto *SrcVec2 = getOpcodeDef(Op2, MRI);

791 if (!SrcVec2)

793

794 auto *SrcVec1 = getOpcodeDef(Op1, MRI);

795 if (!SrcVec1)

797

799 for (unsigned Idx = 0, E = SrcVec1->getNumSources(); Idx < E; ++Idx) {

801 SrcVec2->getSourceReg(Idx), MRI);

802 if (!MaybeCst)

804 FoldedElements.push_back(*MaybeCst);

805 }

806 return FoldedElements;

807}

808

810 bool SNaN) {

813 return false;

814

817 return true;

818

819

821 return !FPVal->getValueAPF().isNaN() ||

822 (SNaN && !FPVal->getValueAPF().isSignaling());

823 }

824

825 if (DefMI->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {

828 return false;

829 return true;

830 }

831

833 default:

834 break;

835 case TargetOpcode::G_FADD:

836 case TargetOpcode::G_FSUB:

837 case TargetOpcode::G_FMUL:

838 case TargetOpcode::G_FDIV:

839 case TargetOpcode::G_FREM:

840 case TargetOpcode::G_FSIN:

841 case TargetOpcode::G_FCOS:

842 case TargetOpcode::G_FTAN:

843 case TargetOpcode::G_FACOS:

844 case TargetOpcode::G_FASIN:

845 case TargetOpcode::G_FATAN:

846 case TargetOpcode::G_FATAN2:

847 case TargetOpcode::G_FCOSH:

848 case TargetOpcode::G_FSINH:

849 case TargetOpcode::G_FTANH:

850 case TargetOpcode::G_FMA:

851 case TargetOpcode::G_FMAD:

852 if (SNaN)

853 return true;

854

855

856 return false;

857 case TargetOpcode::G_FMINNUM_IEEE:

858 case TargetOpcode::G_FMAXNUM_IEEE: {

859 if (SNaN)

860 return true;

861

862

867 }

868 case TargetOpcode::G_FMINNUM:

869 case TargetOpcode::G_FMAXNUM: {

870

871

874 }

875 }

876

877 if (SNaN) {

878

879

881 case TargetOpcode::G_FPEXT:

882 case TargetOpcode::G_FPTRUNC:

883 case TargetOpcode::G_FCANONICALIZE:

884 return true;

885 default:

886 return false;

887 }

888 }

889

890 return false;

891}

892

895 auto PSV = dyn_cast_if_present<const PseudoSourceValue *>(MPO.V);

896 if (auto FSPV = dyn_cast_or_null(PSV)) {

900 }

901

902 if (const Value *V = dyn_cast_if_present<const Value *>(MPO.V)) {

904 return V->getPointerAlignment(M->getDataLayout());

905 }

906

908}

909

917 Register LiveIn = MRI.getLiveInVirtReg(PhysReg);

918 if (LiveIn) {

920 if (Def) {

921

922 assert(Def->getParent() == &EntryMBB && "live-in copy not in entry block");

923 return LiveIn;

924 }

925

926

927

928

929 } else {

930

931 LiveIn = MF.addLiveIn(PhysReg, &RC);

933 MRI.setType(LiveIn, RegTy);

934 }

935

936 BuildMI(EntryMBB, EntryMBB.begin(), DL, TII.get(TargetOpcode::COPY), LiveIn)

938 if (!EntryMBB.isLiveIn(PhysReg))

940 return LiveIn;

941}

942

947 if (MaybeOp1Cst) {

948 switch (Opcode) {

949 default:

950 break;

951 case TargetOpcode::G_SEXT_INREG: {

952 LLT Ty = MRI.getType(Op1);

954 }

955 }

956 }

957 return std::nullopt;

958}

959

964 if (!Val)

965 return Val;

966

968

969 switch (Opcode) {

970 case TargetOpcode::G_SEXT:

971 return Val->sext(DstSize);

972 case TargetOpcode::G_ZEXT:

973 case TargetOpcode::G_ANYEXT:

974

975 return Val->zext(DstSize);

976 default:

977 break;

978 }

979

981}

982

983std::optional

986 assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);

989 DstVal.convertFromAPInt(*MaybeSrcVal, Opcode == TargetOpcode::G_SITOFP,

990 APFloat::rmNearestTiesToEven);

991 return DstVal;

992 }

993 return std::nullopt;

994}

995

996std::optional<SmallVector>

998 std::function<unsigned(APInt)> CB) {

999 LLT Ty = MRI.getType(Src);

1001 auto tryFoldScalar = [&](Register R) -> std::optional {

1003 if (!MaybeCst)

1004 return std::nullopt;

1005 return CB(*MaybeCst);

1006 };

1008

1009 auto *BV = getOpcodeDef(Src, MRI);

1010 if (!BV)

1011 return std::nullopt;

1012 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {

1013 if (auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {

1015 continue;

1016 }

1017 return std::nullopt;

1018 }

1019 return FoldedCTLZs;

1020 }

1021 if (auto MaybeCst = tryFoldScalar(Src)) {

1023 return FoldedCTLZs;

1024 }

1025 return std::nullopt;

1026}

1027

1028std::optional<SmallVector>

1031 LLT Ty = MRI.getType(Op1);

1032 if (Ty != MRI.getType(Op2))

1033 return std::nullopt;

1034

1039 if (!LHSCst || !RHSCst)

1040 return std::nullopt;

1041

1042 switch (Pred) {

1043 case CmpInst::Predicate::ICMP_EQ:

1044 return APInt(1, LHSCst->eq(*RHSCst));

1045 case CmpInst::Predicate::ICMP_NE:

1046 return APInt(1, LHSCst->ne(*RHSCst));

1047 case CmpInst::Predicate::ICMP_UGT:

1048 return APInt(1, LHSCst->ugt(*RHSCst));

1049 case CmpInst::Predicate::ICMP_UGE:

1050 return APInt(1, LHSCst->uge(*RHSCst));

1051 case CmpInst::Predicate::ICMP_ULT:

1052 return APInt(1, LHSCst->ult(*RHSCst));

1053 case CmpInst::Predicate::ICMP_ULE:

1054 return APInt(1, LHSCst->ule(*RHSCst));

1055 case CmpInst::Predicate::ICMP_SGT:

1056 return APInt(1, LHSCst->sgt(*RHSCst));

1057 case CmpInst::Predicate::ICMP_SGE:

1058 return APInt(1, LHSCst->sge(*RHSCst));

1059 case CmpInst::Predicate::ICMP_SLT:

1060 return APInt(1, LHSCst->slt(*RHSCst));

1061 case CmpInst::Predicate::ICMP_SLE:

1062 return APInt(1, LHSCst->sle(*RHSCst));

1063 default:

1064 return std::nullopt;

1065 }

1066 };

1067

1069

1071

1072 auto *BV1 = getOpcodeDef(Op1, MRI);

1073 auto *BV2 = getOpcodeDef(Op2, MRI);

1074 if (!BV1 || !BV2)

1075 return std::nullopt;

1076 assert(BV1->getNumSources() == BV2->getNumSources() && "Invalid vectors");

1077 for (unsigned I = 0; I < BV1->getNumSources(); ++I) {

1078 if (auto MaybeFold =

1079 TryFoldScalar(BV1->getSourceReg(I), BV2->getSourceReg(I))) {

1081 continue;

1082 }

1083 return std::nullopt;

1084 }

1085 return FoldedICmps;

1086 }

1087

1088 if (auto MaybeCst = TryFoldScalar(Op1, Op2)) {

1090 return FoldedICmps;

1091 }

1092

1093 return std::nullopt;

1094}

1095

1098 std::optional DefSrcReg =

1100 if (!DefSrcReg)

1101 return false;

1102

1104 const LLT Ty = MRI.getType(Reg);

1105

1106 switch (MI.getOpcode()) {

1107 case TargetOpcode::G_CONSTANT: {

1109 const ConstantInt *CI = MI.getOperand(1).getCImm();

1111 }

1112 case TargetOpcode::G_SHL: {

1113

1114

1115

1116

1118 if (*ConstLHS == 1)

1119 return true;

1120 }

1121

1122 break;

1123 }

1124 case TargetOpcode::G_LSHR: {

1126 if (ConstLHS->isSignMask())

1127 return true;

1128 }

1129

1130 break;

1131 }

1132 case TargetOpcode::G_BUILD_VECTOR: {

1133

1134

1137 return false;

1138

1139 return true;

1140 }

1141 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {

1142

1143

1147 if (!Const || !Const->zextOrTrunc(BitWidth).isPowerOf2())

1148 return false;

1149 }

1150

1151 return true;

1152 }

1153 default:

1154 break;

1155 }

1156

1157 if (!KB)

1158 return false;

1159

1160

1161

1162

1163

1166}

1167

1170}

1171

1174 return OrigTy;

1175

1179

1180

1181

1182

1183

1184

1185

1188 "getLCMType not implemented between fixed and scalable vectors.");

1189

1193

1196 return LLT::vector(Mul.divideCoefficientBy(GCDMinElts),

1198 }

1203 OrigElt);

1204 }

1205

1206

1208 LLT VecTy = OrigTy.isVector() ? OrigTy : TargetTy;

1209 LLT ScalarTy = OrigTy.isVector() ? TargetTy : OrigTy;

1212

1213

1216

1217

1218

1222

1225 OrigEltTy);

1226 }

1227

1228

1231

1233 return OrigTy;

1235 return TargetTy;

1237}

1238

1240

1244 "getCoverTy not implemented between fixed and scalable vectors.");

1245

1246 if (!OrigTy.isVector() || !TargetTy.isVector() || OrigTy == TargetTy ||

1248 return getLCMType(OrigTy, TargetTy);

1249

1252 if (OrigTyNumElts % TargetTyNumElts == 0)

1253 return OrigTy;

1254

1255 unsigned NumElts = alignTo(OrigTyNumElts, TargetTyNumElts);

1258}

1259

1262 return OrigTy;

1263

1266

1267

1268

1269

1270

1271

1272

1275 "getGCDType not implemented between fixed and scalable vectors.");

1276

1281 OrigElt);

1282

1283

1286 GCD);

1287

1291 OrigElt);

1292 }

1293

1294

1295

1301 return OrigTy;

1302

1303

1304

1305

1306

1312}

1313

1315 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&

1316 "Only G_SHUFFLE_VECTOR can have a splat index!");

1318 auto FirstDefinedIdx = find_if(Mask, [](int Elt) { return Elt >= 0; });

1319

1320

1321

1322 if (FirstDefinedIdx == Mask.end())

1323 return 0;

1324

1325

1326

1327 int SplatValue = *FirstDefinedIdx;

1328 if (any_of(make_range(std::next(FirstDefinedIdx), Mask.end()),

1329 [&SplatValue](int Elt) { return Elt >= 0 && Elt != SplatValue; }))

1330 return std::nullopt;

1331

1332 return SplatValue;

1333}

1334

1336 return Opcode == TargetOpcode::G_BUILD_VECTOR ||

1337 Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;

1338}

1339

1340namespace {

1341

1342std::optional getAnyConstantSplat(Register VReg,

1344 bool AllowUndef) {

1346 if (MI)

1347 return std::nullopt;

1348

1349 bool isConcatVectorsOp = MI->getOpcode() == TargetOpcode::G_CONCAT_VECTORS;

1351 return std::nullopt;

1352

1353 std::optional SplatValAndReg;

1356

1357

1358 auto ElementValAndReg =

1359 isConcatVectorsOp

1360 ? getAnyConstantSplat(Element, MRI, AllowUndef)

1362

1363

1364 if (!ElementValAndReg) {

1365 if (AllowUndef && isa(MRI.getVRegDef(Element)))

1366 continue;

1367 return std::nullopt;

1368 }

1369

1370

1371 if (!SplatValAndReg)

1372 SplatValAndReg = ElementValAndReg;

1373

1374

1375 if (SplatValAndReg->Value != ElementValAndReg->Value)

1376 return std::nullopt;

1377 }

1378

1379 return SplatValAndReg;

1380}

1381

1382}

1383

1386 int64_t SplatValue, bool AllowUndef) {

1387 if (auto SplatValAndReg = getAnyConstantSplat(Reg, MRI, AllowUndef))

1389 return false;

1390}

1391

1394 int64_t SplatValue, bool AllowUndef) {

1396 AllowUndef);

1397}

1398

1399std::optional

1401 if (auto SplatValAndReg =

1402 getAnyConstantSplat(Reg, MRI, false)) {

1403 if (std::optional ValAndVReg =

1405 return ValAndVReg->Value;

1406 }

1407

1408 return std::nullopt;

1409}

1410

1411std::optional

1415}

1416

1417std::optional<int64_t>

1420 if (auto SplatValAndReg =

1421 getAnyConstantSplat(Reg, MRI, false))

1423 return std::nullopt;

1424}

1425

1426std::optional<int64_t>

1430}

1431

1432std::optional

1434 bool AllowUndef) {

1435 if (auto SplatValAndReg = getAnyConstantSplat(VReg, MRI, AllowUndef))

1437 return std::nullopt;

1438}

1439

1442 bool AllowUndef) {

1444}

1445

1448 bool AllowUndef) {

1450}

1451

1452std::optional

1454 unsigned Opc = MI.getOpcode();

1456 return std::nullopt;

1459 auto Reg = MI.getOperand(1).getReg();

1461 [&Reg](const MachineOperand &Op) { return Op.getReg() != Reg; }))

1462 return std::nullopt;

1464}

1465

1468 bool AllowFP = true,

1469 bool AllowOpaqueConstants = true) {

1470 switch (MI.getOpcode()) {

1471 case TargetOpcode::G_CONSTANT:

1472 case TargetOpcode::G_IMPLICIT_DEF:

1473 return true;

1474 case TargetOpcode::G_FCONSTANT:

1475 return AllowFP;

1476 case TargetOpcode::G_GLOBAL_VALUE:

1477 case TargetOpcode::G_FRAME_INDEX:

1478 case TargetOpcode::G_BLOCK_ADDR:

1479 case TargetOpcode::G_JUMP_TABLE:

1480 return AllowOpaqueConstants;

1481 default:

1482 return false;

1483 }

1484}

1485

1488 Register Def = MI.getOperand(0).getReg();

1490 return true;

1492 if (!BV)

1493 return false;

1494 for (unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {

1496 getOpcodeDef(BV->getSourceReg(SrcIdx), MRI))

1497 continue;

1498 return false;

1499 }

1500 return true;

1501}

1502

1505 bool AllowFP, bool AllowOpaqueConstants) {

1507 return true;

1508

1510 return false;

1511

1512 const unsigned NumOps = MI.getNumOperands();

1513 for (unsigned I = 1; I != NumOps; ++I) {

1514 const MachineInstr *ElementDef = MRI.getVRegDef(MI.getOperand(I).getReg());

1516 return false;

1517 }

1518

1519 return true;

1520}

1521

1522std::optional

1525 Register Def = MI.getOperand(0).getReg();

1527 return C->Value;

1529 if (!MaybeCst)

1530 return std::nullopt;

1531 const unsigned ScalarSize = MRI.getType(Def).getScalarSizeInBits();

1532 return APInt(ScalarSize, *MaybeCst, true);

1533}

1534

1535std::optional

1538 Register Def = MI.getOperand(0).getReg();

1540 return FpConst->Value;

1542 if (!MaybeCstFP)

1543 return std::nullopt;

1544 return MaybeCstFP->Value;

1545}

1546

1549 switch (MI.getOpcode()) {

1550 case TargetOpcode::G_IMPLICIT_DEF:

1551 return AllowUndefs;

1552 case TargetOpcode::G_CONSTANT:

1553 return MI.getOperand(1).getCImm()->isNullValue();

1554 case TargetOpcode::G_FCONSTANT: {

1555 const ConstantFP *FPImm = MI.getOperand(1).getFPImm();

1557 }

1558 default:

1559 if (!AllowUndefs)

1560 return false;

1562 }

1563}

1564

1567 bool AllowUndefs) {

1568 switch (MI.getOpcode()) {

1569 case TargetOpcode::G_IMPLICIT_DEF:

1570 return AllowUndefs;

1571 case TargetOpcode::G_CONSTANT:

1572 return MI.getOperand(1).getCImm()->isAllOnesValue();

1573 default:

1574 if (!AllowUndefs)

1575 return false;

1577 }

1578}

1579

1582 std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {

1583

1585 if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)

1586 return Match(nullptr);

1587

1588

1589 if (Def->getOpcode() == TargetOpcode::G_CONSTANT)

1590 return Match(Def->getOperand(1).getCImm());

1591

1592 if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)

1593 return false;

1594

1595 for (unsigned I = 1, E = Def->getNumOperands(); I != E; ++I) {

1596 Register SrcElt = Def->getOperand(I).getReg();

1598 if (AllowUndefs && SrcDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {

1599 if (Match(nullptr))

1600 return false;

1601 continue;

1602 }

1603

1604 if (SrcDef->getOpcode() != TargetOpcode::G_CONSTANT ||

1606 return false;

1607 }

1608

1609 return true;

1610}

1611

1613 bool IsFP) {

1615 case TargetLowering::UndefinedBooleanContent:

1616 return Val & 0x1;

1617 case TargetLowering::ZeroOrOneBooleanContent:

1618 return Val == 1;

1619 case TargetLowering::ZeroOrNegativeOneBooleanContent:

1620 return Val == -1;

1621 }

1623}

1624

1626 bool IsVector, bool IsFP) {

1628 case TargetLowering::UndefinedBooleanContent:

1629 return ~Val & 0x1;

1630 case TargetLowering::ZeroOrOneBooleanContent:

1631 case TargetLowering::ZeroOrNegativeOneBooleanContent:

1632 return Val == 0;

1633 }

1635}

1636

1638 bool IsFP) {

1640 case TargetLowering::UndefinedBooleanContent:

1641 case TargetLowering::ZeroOrOneBooleanContent:

1642 return 1;

1643 case TargetLowering::ZeroOrNegativeOneBooleanContent:

1644 return -1;

1645 }

1647}

1648

1653 if (Op.isReg() && Op.getReg().isVirtual())

1654 DeadInstChain.insert(MRI.getVRegDef(Op.getReg()));

1655 }

1658 MI.eraseFromParent();

1659 if (LocObserver)

1661}

1662

1669

1670 while (!DeadInstChain.empty()) {

1673 continue;

1675 }

1676}

1677

1681}

1682

1684 for (auto &Def : MI.defs()) {

1685 assert(Def.isReg() && "Must be a reg");

1686

1688 for (auto &MOUse : MRI.use_operands(Def.getReg())) {

1690

1691 if (DbgValue->isNonListDebugValue() && DbgValue->getNumOperands() == 4) {

1693 }

1694 }

1695

1696 if (!DbgUsers.empty()) {

1698 }

1699 }

1700}

1701

1703 switch (Opc) {

1704 case TargetOpcode::G_FABS:

1705 case TargetOpcode::G_FADD:

1706 case TargetOpcode::G_FCANONICALIZE:

1707 case TargetOpcode::G_FCEIL:

1708 case TargetOpcode::G_FCONSTANT:

1709 case TargetOpcode::G_FCOPYSIGN:

1710 case TargetOpcode::G_FCOS:

1711 case TargetOpcode::G_FDIV:

1712 case TargetOpcode::G_FEXP2:

1713 case TargetOpcode::G_FEXP:

1714 case TargetOpcode::G_FFLOOR:

1715 case TargetOpcode::G_FLOG10:

1716 case TargetOpcode::G_FLOG2:

1717 case TargetOpcode::G_FLOG:

1718 case TargetOpcode::G_FMA:

1719 case TargetOpcode::G_FMAD:

1720 case TargetOpcode::G_FMAXIMUM:

1721 case TargetOpcode::G_FMAXNUM:

1722 case TargetOpcode::G_FMAXNUM_IEEE:

1723 case TargetOpcode::G_FMINIMUM:

1724 case TargetOpcode::G_FMINNUM:

1725 case TargetOpcode::G_FMINNUM_IEEE:

1726 case TargetOpcode::G_FMUL:

1727 case TargetOpcode::G_FNEARBYINT:

1728 case TargetOpcode::G_FNEG:

1729 case TargetOpcode::G_FPEXT:

1730 case TargetOpcode::G_FPOW:

1731 case TargetOpcode::G_FPTRUNC:

1732 case TargetOpcode::G_FREM:

1733 case TargetOpcode::G_FRINT:

1734 case TargetOpcode::G_FSIN:

1735 case TargetOpcode::G_FTAN:

1736 case TargetOpcode::G_FACOS:

1737 case TargetOpcode::G_FASIN:

1738 case TargetOpcode::G_FATAN:

1739 case TargetOpcode::G_FATAN2:

1740 case TargetOpcode::G_FCOSH:

1741 case TargetOpcode::G_FSINH:

1742 case TargetOpcode::G_FTANH:

1743 case TargetOpcode::G_FSQRT:

1744 case TargetOpcode::G_FSUB:

1745 case TargetOpcode::G_INTRINSIC_ROUND:

1746 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:

1747 case TargetOpcode::G_INTRINSIC_TRUNC:

1748 return true;

1749 default:

1750 return false;

1751 }

1752}

1753

1754

1757 LLT Ty = MRI.getType(ShiftAmount);

1758

1760 return false;

1761

1763 std::optional Val =

1765 if (!Val)

1766 return false;

1768 }

1769

1770 GBuildVector *BV = getOpcodeDef(ShiftAmount, MRI);

1771 if (!BV)

1772 return false;

1773

1775 for (unsigned I = 0; I < Sources; ++I) {

1776 std::optional Val =

1778 if (!Val)

1779 return false;

1781 return false;

1782 }

1783

1784 return true;

1785}

1786

1787namespace {

1792};

1793}

1794

1797}

1798

1801}

1802

1804 bool ConsiderFlagsAndMetadata,

1807

1808 if (ConsiderFlagsAndMetadata && includesPoison(Kind))

1809 if (auto *GMI = dyn_cast(RegDef))

1810 if (GMI->hasPoisonGeneratingFlags())

1811 return true;

1812

1813

1815 case TargetOpcode::G_BUILD_VECTOR:

1816 case TargetOpcode::G_CONSTANT_FOLD_BARRIER:

1817 return false;

1818 case TargetOpcode::G_SHL:

1819 case TargetOpcode::G_ASHR:

1820 case TargetOpcode::G_LSHR:

1823 case TargetOpcode::G_FPTOSI:

1824 case TargetOpcode::G_FPTOUI:

1825

1826

1827 return true;

1828 case TargetOpcode::G_CTLZ:

1829 case TargetOpcode::G_CTTZ:

1830 case TargetOpcode::G_ABS:

1831 case TargetOpcode::G_CTPOP:

1832 case TargetOpcode::G_BSWAP:

1833 case TargetOpcode::G_BITREVERSE:

1834 case TargetOpcode::G_FSHL:

1835 case TargetOpcode::G_FSHR:

1836 case TargetOpcode::G_SMAX:

1837 case TargetOpcode::G_SMIN:

1838 case TargetOpcode::G_UMAX:

1839 case TargetOpcode::G_UMIN:

1840 case TargetOpcode::G_PTRMASK:

1841 case TargetOpcode::G_SADDO:

1842 case TargetOpcode::G_SSUBO:

1843 case TargetOpcode::G_UADDO:

1844 case TargetOpcode::G_USUBO:

1845 case TargetOpcode::G_SMULO:

1846 case TargetOpcode::G_UMULO:

1847 case TargetOpcode::G_SADDSAT:

1848 case TargetOpcode::G_UADDSAT:

1849 case TargetOpcode::G_SSUBSAT:

1850 case TargetOpcode::G_USUBSAT:

1851 return false;

1852 case TargetOpcode::G_SSHLSAT:

1853 case TargetOpcode::G_USHLSAT:

1856 case TargetOpcode::G_INSERT_VECTOR_ELT: {

1859 std::optional Index =

1861 if (!Index)

1862 return true;

1863 LLT VecTy = MRI.getType(Insert->getVectorReg());

1865 }

1866 return false;

1867 }

1868 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {

1871 std::optional Index =

1873 if (!Index)

1874 return true;

1877 }

1878 return false;

1879 }

1880 case TargetOpcode::G_SHUFFLE_VECTOR: {

1881 GShuffleVector *Shuffle = cast(RegDef);

1884 }

1885 case TargetOpcode::G_FNEG:

1886 case TargetOpcode::G_PHI:

1887 case TargetOpcode::G_SELECT:

1888 case TargetOpcode::G_UREM:

1889 case TargetOpcode::G_SREM:

1890 case TargetOpcode::G_FREEZE:

1891 case TargetOpcode::G_ICMP:

1892 case TargetOpcode::G_FCMP:

1893 case TargetOpcode::G_FADD:

1894 case TargetOpcode::G_FSUB:

1895 case TargetOpcode::G_FMUL:

1896 case TargetOpcode::G_FDIV:

1897 case TargetOpcode::G_FREM:

1898 case TargetOpcode::G_PTR_ADD:

1899 return false;

1900 default:

1901 return !isa(RegDef) && !isa(RegDef);

1902 }

1903}

1904

1910 return false;

1911

1913

1915 case TargetOpcode::G_FREEZE:

1916 return true;

1917 case TargetOpcode::G_IMPLICIT_DEF:

1919 case TargetOpcode::G_CONSTANT:

1920 case TargetOpcode::G_FCONSTANT:

1921 return true;

1922 case TargetOpcode::G_BUILD_VECTOR: {

1923 GBuildVector *BV = cast(RegDef);

1925 for (unsigned I = 0; I < NumSources; ++I)

1927 Depth + 1, Kind))

1928 return false;

1929 return true;

1930 }

1931 case TargetOpcode::G_PHI: {

1932 GPhi *Phi = cast(RegDef);

1933 unsigned NumIncoming = Phi->getNumIncomingValues();

1934 for (unsigned I = 0; I < NumIncoming; ++I)

1936 Depth + 1, Kind))

1937 return false;

1938 return true;

1939 }

1940 default: {

1942 if (!MO.isReg())

1943 return true;

1944 return ::isGuaranteedNotToBeUndefOrPoison(MO.getReg(), MRI, Depth + 1,

1945 Kind);

1946 };

1948 true, Kind) &&

1950 }

1951 }

1952}

1953

1955 bool ConsiderFlagsAndMetadata) {

1956 return ::canCreateUndefOrPoison(Reg, MRI, ConsiderFlagsAndMetadata,

1958}

1959

1961 bool ConsiderFlagsAndMetadata = true) {

1962 return ::canCreateUndefOrPoison(Reg, MRI, ConsiderFlagsAndMetadata,

1964}

1965

1968 unsigned Depth) {

1969 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,

1971}

1972

1975 unsigned Depth) {

1976 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,

1978}

1979

1982 unsigned Depth) {

1983 return ::isGuaranteedNotToBeUndefOrPoison(Reg, MRI, Depth,

1985}

1986

1992}

1993

1996

1998}

1999

2000std::optional

2003

2005 std::optional MayBeConstant =

2007 if (!MayBeConstant)

2008 return std::nullopt;

2010 }

2011

2014 unsigned NumSources = Build->getNumSources();

2015 for (unsigned I = 0; I < NumSources; ++I) {

2016 Register SrcReg = Build->getSourceReg(I);

2017 std::optional MayBeConstant =

2019 if (!MayBeConstant)

2020 return std::nullopt;

2021 Values.push_back(MayBeConstant->Value);

2022 }

2024 }

2025

2026 std::optional MayBeConstant =

2028 if (!MayBeConstant)

2029 return std::nullopt;

2030

2032}

2033

2036

2037 return Values[0];

2038}

2039

2040std::optional

2043

2045 std::optional MayBeConstant =

2047 if (!MayBeConstant)

2048 return std::nullopt;

2050 }

2051

2054 unsigned NumSources = Build->getNumSources();

2055 for (unsigned I = 0; I < NumSources; ++I) {

2056 Register SrcReg = Build->getSourceReg(I);

2057 std::optional MayBeConstant =

2059 if (!MayBeConstant)

2060 return std::nullopt;

2061 Values.push_back(MayBeConstant->Value);

2062 }

2064 }

2065

2066 std::optional MayBeConstant =

2068 if (!MayBeConstant)

2069 return std::nullopt;

2070

2072}

unsigned const MachineRegisterInfo * MRI

MachineInstrBuilder MachineInstrBuilder & DefMI

This file declares a class to represent arbitrary precision floating point values and provide a varie...

This file implements a class to represent arbitrary precision integral constant values and operations...

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

static bool canCreateUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI, bool ConsiderFlagsAndMetadata, UndefPoisonKind Kind)

static bool isGuaranteedNotToBeUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI, unsigned Depth, UndefPoisonKind Kind)

static bool includesPoison(UndefPoisonKind Kind)

static bool includesUndef(UndefPoisonKind Kind)

static void reportGISelDiagnostic(DiagnosticSeverity Severity, MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)

static bool shiftAmountKnownInRange(Register ShiftAmount, const MachineRegisterInfo &MRI)

Shifts return poison if shiftwidth is larger than the bitwidth.

bool canCreatePoison(Register Reg, const MachineRegisterInfo &MRI, bool ConsiderFlagsAndMetadata=true)

static bool isBuildVectorOp(unsigned Opcode)

static bool isConstantScalar(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)

This file contains the declarations for the subclasses of Constant, which represent the different fla...

Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx

This contains common code to allow clients to notify changes to machine instr.

Provides analysis for querying information about KnownBits during GISel passes.

Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...

const HexagonInstrInfo * TII

Tracks DebugLocs between checkpoints and verifies that they are transferred.

Contains matchers for matching SSA Machine Instructions.

This file declares the MachineIRBuilder class.

unsigned const TargetRegisterInfo * TRI

uint64_t IntrinsicInst * II

assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())

This file describes how to lower LLVM code to machine code.

Target-Independent Code Generator Pass Configuration Options pass.

static const char PassName[]

Class recording the (high level) value of a variable.

opStatus divide(const APFloat &RHS, roundingMode RM)

void copySign(const APFloat &RHS)

opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)

opStatus subtract(const APFloat &RHS, roundingMode RM)

opStatus add(const APFloat &RHS, roundingMode RM)

opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)

opStatus multiply(const APFloat &RHS, roundingMode RM)

APInt bitcastToAPInt() const

opStatus mod(const APFloat &RHS)

Class for arbitrary precision integers.

APInt udiv(const APInt &RHS) const

Unsigned division operation.

APInt zext(unsigned width) const

Zero extend to a new width.

APInt zextOrTrunc(unsigned width) const

Zero extend or truncate to width.

APInt trunc(unsigned width) const

Truncate to new width.

APInt urem(const APInt &RHS) const

Unsigned remainder operation.

unsigned getBitWidth() const

Return the number of bits in the APInt.

APInt sdiv(const APInt &RHS) const

Signed division function for APInt.

APInt sextOrTrunc(unsigned width) const

Sign extend or truncate to width.

APInt ashr(unsigned ShiftAmt) const

Arithmetic right-shift function.

APInt srem(const APInt &RHS) const

Function for signed remainder operation.

APInt sext(unsigned width) const

Sign extend to a new width.

bool isPowerOf2() const

Check if this APInt's value is a power of two greater than zero.

APInt lshr(unsigned shiftAmt) const

Logical right-shift function.

Represent the analysis usage information of a pass.

AnalysisUsage & addPreserved()

Add the specified Pass class to the set of analyses preserved by this pass.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

ConstantFP - Floating Point Values [float, double].

const APFloat & getValueAPF() const

bool isNegative() const

Return true if the sign bit is set.

bool isZero() const

Return true if the value is positive or negative zero.

This is the shared class of boolean and integer constants.

const APInt & getValue() const

Return the constant as an APInt value reference.

This is an important base class in LLVM.

This class represents an Operation in the Expression.

static constexpr ElementCount getFixed(ScalarTy MinVal)

static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)

Represents a G_BUILD_VECTOR.

An floating-point-like constant.

static std::optional< GFConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)

APFloat getScalarValue() const

Returns the value, if this constant is a scalar.

An integer-like constant.

APInt getScalarValue() const

Returns the value, if this constant is a scalar.

static std::optional< GIConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)

Abstract class that contains various methods for clients to notify about changes.

KnownBits getKnownBits(Register R)

void insert(MachineInstr *I)

Add the specified instruction to the worklist if it isn't already in it.

MachineInstr * pop_back_val()

void remove(const MachineInstr *I)

Remove I from the worklist if it exists.

Represents an insert vector element.

Register getSourceReg(unsigned I) const

Returns the I'th source register.

unsigned getNumSources() const

Returns the number of source registers.

Represents a G_SHUFFLE_VECTOR.

ArrayRef< int > getMask() const

Represents a splat vector.

Module * getParent()

Get the module that this global value is contained inside of...

static IntegerType * get(LLVMContext &C, unsigned NumBits)

This static method is the primary way of constructing an IntegerType.

constexpr bool isScalableVector() const

Returns true if the LLT is a scalable vector.

constexpr unsigned getScalarSizeInBits() const

constexpr bool isScalar() const

static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)

Get a low-level vector of some number of elements and element width.

static constexpr LLT scalar(unsigned SizeInBits)

Get a low-level scalar or aggregate "bag of bits".

constexpr bool isValid() const

constexpr uint16_t getNumElements() const

Returns the number of elements in a vector LLT.

constexpr bool isVector() const

constexpr bool isScalable() const

Returns true if the LLT is a scalable vector.

constexpr TypeSize getSizeInBits() const

Returns the total size of the type. Must only be called on sized types.

constexpr LLT getElementType() const

Returns the vector's element type. Only valid for vector types.

constexpr ElementCount getElementCount() const

static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)

Get a low-level fixed-width vector of some number of elements and element width.

constexpr bool isFixedVector() const

Returns true if the LLT is a fixed vector.

constexpr LLT getScalarType() const

static constexpr LLT scalarOrVector(ElementCount EC, LLT ScalarTy)

This is an important class for using LLVM in a threaded context.

void checkpoint(bool CheckDebugLocs=true)

Call this to indicate that it's a good point to assess whether locations have been lost.

Describe properties that are true of each instruction in the target description file.

Wrapper class representing physical registers. Should be passed by value.

void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())

Adds the specified register as a live in.

const MachineFunction * getParent() const

Return the MachineFunction containing this basic block.

bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const

Return true if the specified register is in the live in set.

The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.

Align getObjectAlign(int ObjectIdx) const

Return the alignment of the specified stack object.

MachineFunctionProperties & set(Property P)

StringRef getName() const

getName - Return the name of the corresponding LLVM function.

GISelChangeObserver * getObserver() const

MachineFrameInfo & getFrameInfo()

getFrameInfo - Return the frame info object for the current function.

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

Function & getFunction()

Return the LLVM function that this machine code represents.

const MachineFunctionProperties & getProperties() const

Get the function properties.

const MachineBasicBlock & front() const

Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)

addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...

const TargetMachine & getTarget() const

getTarget - Return the target machine this machine code is compiled with

Helper class to build MachineInstr.

MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)

Build and insert Res0, ... = G_UNMERGE_VALUES Op.

MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)

Build and insert Res0, ... = G_EXTRACT Src, Idx0.

MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)

Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...

Register getReg(unsigned Idx) const

Get the register for the operand index.

const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const

Add a new virtual register operand.

Representation of each machine instruction.

unsigned getOpcode() const

Returns the opcode of this MachineInstr.

const MachineBasicBlock * getParent() const

bool getFlag(MIFlag Flag) const

Return whether an MI flag is set.

iterator_range< mop_iterator > uses()

Returns a range that includes all operands which may be register uses.

const MachineFunction * getMF() const

Return the function that contains the basic block that this instruction belongs to.

const DebugLoc & getDebugLoc() const

Returns the debug location id of this MachineInstr.

const MachineOperand & getOperand(unsigned i) const

MachineOperand class - Representation of each machine instruction operand.

const ConstantInt * getCImm() const

bool isCImm() const

isCImm - Test if this is a MO_CImmediate operand.

bool isReg() const

isReg - Tests if this is a MO_Register operand.

void setReg(Register Reg)

Change the register this operand corresponds to.

MachineInstr * getParent()

getParent - Return the instruction that this operand belongs to.

Register getReg() const

getReg - Returns the register number.

const ConstantFP * getFPImm() const

bool isFPImm() const

isFPImm - Tests if this is a MO_FPImmediate operand.

MachineRegisterInfo - Keep track of information for virtual and physical registers,...

A Module instance is used to store all the information related to an LLVM module.

Represents a value which can be a Register or a constant.

Holds all the information related to register banks.

static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)

Constrain the (possibly generic) virtual register Reg to RC.

Wrapper class representing virtual and physical registers.

constexpr bool isPhysical() const

Return true if the specified register number is in the physical register namespace.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

reference emplace_back(ArgTypes &&... Args)

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

StringRef - Represent a constant reference to a string, i.e.

TargetInstrInfo - Interface to description of machine instruction set.

BooleanContent getBooleanContents(bool isVec, bool isFloat) const

For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...

This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...

Primary interface to the complete machine description for the target machine.

Target-Independent Code Generator Pass Configuration Options.

bool isGlobalISelAbortEnabled() const

Check whether or not GlobalISel should abort on error.

TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...

Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...

The instances of the Type class are immutable: once they are created, they are never changed.

LLVM Value Representation.

constexpr ScalarTy getFixedValue() const

constexpr bool isScalable() const

Returns whether the quantity is scaled by a runtime quantity (vscale).

constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const

constexpr ScalarTy getKnownMinValue() const

Returns the minimum value this quantity can represent.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

const APInt & smin(const APInt &A, const APInt &B)

Determine the smaller of two APInts considered to be signed.

const APInt & smax(const APInt &A, const APInt &B)

Determine the larger of two APInts considered to be signed.

const APInt & umin(const APInt &A, const APInt &B)

Determine the smaller of two APInts considered to be unsigned.

const APInt & umax(const APInt &A, const APInt &B)

Determine the larger of two APInts considered to be unsigned.

@ C

The default llvm calling convention, compatible with C.

SpecificConstantMatch m_SpecificICst(int64_t RequestedValue)

Matches a constant equal to RequestedValue.

bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)

DiagnosticInfoMIROptimization::MachineArgument MNV

This is an optimization pass for GlobalISel generic memory operations.

Register getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TII, MCRegister PhysReg, const TargetRegisterClass &RC, const DebugLoc &DL, LLT RegTy=LLT())

Return a virtual register corresponding to the incoming argument register PhysReg.

auto drop_begin(T &&RangeOrContainer, size_t N=1)

Return a range covering RangeOrContainer with the first N elements excluded.

bool isBuildVectorAllZeros(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)

Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...

Type * getTypeForLLT(LLT Ty, LLVMContext &C)

Get the type back from LLT.

bool all_of(R &&range, UnaryPredicate P)

Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.

Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)

Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...

MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)

See if Reg is defined by an single def instruction that is Opcode.

const ConstantFP * getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI)

MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)

Builder interface. Specify how to create the initial instruction itself.

std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)

If VReg is defined by a G_CONSTANT, return the corresponding value.

std::optional< APFloat > ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src, const MachineRegisterInfo &MRI)

std::optional< APInt > getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI)

bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)

Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...

const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)

Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.

std::optional< APFloat > ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)

void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)

Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...

bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)

Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...

bool isPreISelGenericOpcode(unsigned Opcode)

Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.

iterator_range< T > make_range(T x, T y)

Convenience function for iterating over sub-ranges.

std::optional< SmallVector< unsigned > > ConstantFoldCountZeros(Register Src, const MachineRegisterInfo &MRI, std::function< unsigned(APInt)> CB)

Tries to constant fold a counting-zero operation (G_CTLZ or G_CTTZ) on Src.

std::optional< APInt > ConstantFoldExtOp(unsigned Opcode, const Register Op1, uint64_t Imm, const MachineRegisterInfo &MRI)

std::optional< RegOrConstant > getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI)

LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)

Implements IEEE 754-2019 maximum semantics.

bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)

Return true if the given value is known to have exactly one bit set when defined.

std::optional< APInt > isConstantOrConstantSplatVector(MachineInstr &MI, const MachineRegisterInfo &MRI)

Determines if MI defines a constant integer or a splat vector of constant integers.

bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)

Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...

MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)

Find the def instruction for Reg, folding away any trivial copies.

bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg, std::function< bool(const Constant *ConstVal)> Match, bool AllowUndefs=false)

Attempt to match a unary predicate against a scalar/splat constant or every element of a constant G_B...

bool isPreISelGenericOptimizationHint(unsigned Opcode)

bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)

Returns true if V cannot be undef, but may be poison.

bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)

Returns true if given the TargetLowering's boolean contents information, the value Val contains a tru...

LLVM_READNONE LLT getLCMType(LLT OrigTy, LLT TargetTy)

Return the least common multiple type of OrigTy and TargetTy, by changing the number of vector elemen...

std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)

If VReg is defined by a G_CONSTANT fits in int64_t returns it.

std::optional< APInt > ConstantFoldBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)

bool any_of(R &&range, UnaryPredicate P)

Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.

const APInt & getIConstantFromReg(Register VReg, const MachineRegisterInfo &MRI)

VReg is defined by a G_CONSTANT, return the corresponding value.

LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)

Implements IEEE-754 2019 maximumNumber semantics.

bool isConstantOrConstantVector(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)

Return true if the specified instruction is known to be a constant, or a vector of constants.

constexpr unsigned MaxAnalysisRecursionDepth

auto reverse(ContainerTy &&C)

bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI)

Check if DstReg can be replaced with SrcReg depending on the register constraints.

void saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver, SmallInstListTy &DeadInstChain)

void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)

Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.

raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

void report_fatal_error(Error Err, bool gen_crash_diag=true)

Report a serious error, calling any installed error handler.

std::optional< SmallVector< APInt > > ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)

std::optional< ValueAndVReg > getAnyConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true, bool LookThroughAnyExt=false)

If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT or G_FCONST...

bool isBuildVectorAllOnes(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)

Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...

bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)

canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...

SmallVector< APInt > ConstantFoldVectorBinop(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)

Tries to constant fold a vector binop with sources Op1 and Op2.

std::optional< FPValueAndVReg > getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI, bool AllowUndef=true)

Returns a floating point scalar constant of a build vector splat if it exists.

std::optional< APInt > ConstantFoldCastOp(unsigned Opcode, LLT DstTy, const Register Op0, const MachineRegisterInfo &MRI)

void extractParts(Register Reg, LLT Ty, int NumParts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)

Helper function to split a wide generic register into bitwise blocks with the given Type (which impli...

void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)

Modify analysis usage so it preserves passes required for the SelectionDAG fallback.

LLVM_READNONE LLT getCoverTy(LLT OrigTy, LLT TargetTy)

Return smallest type that covers both OrigTy and TargetTy and is multiple of TargetTy.

LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)

Implements IEEE-754 2019 minimumNumber semantics.

unsigned getInverseGMinMaxOpcode(unsigned MinMaxOpc)

Returns the inverse opcode of MinMaxOpc, which is a generic min/max opcode like G_SMIN.

uint64_t alignTo(uint64_t Size, Align A)

Returns a multiple of A needed to store Size bytes.

bool isTargetSpecificOpcode(unsigned Opcode)

Check whether the given Opcode is a target-specific opcode.

bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)

Return true if this function can prove that V does not have undef bits and is never poison.

std::optional< FPValueAndVReg > getFConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)

If VReg is defined by a statically evaluable chain of instructions rooted on a G_FCONSTANT returns it...

bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)

std::optional< APFloat > isConstantOrConstantSplatVectorFP(MachineInstr &MI, const MachineRegisterInfo &MRI)

Determines if MI defines a float constant integer or a splat vector of float constant integers.

constexpr unsigned BitWidth

APFloat getAPFloatFromSize(double Val, unsigned Size)

Returns an APFloat from Val converted to the appropriate size.

bool isBuildVectorConstantSplat(const Register Reg, const MachineRegisterInfo &MRI, int64_t SplatValue, bool AllowUndef)

Return true if the specified register is defined by G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all ...

void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)

DiagnosticSeverity

Defines the different supported severity of a diagnostic.

Register constrainRegToClass(MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, Register Reg, const TargetRegisterClass &RegClass)

Try to constrain Reg to the specified register class.

int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP)

Returns an integer representing true, as defined by the TargetBooleanContents.

std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)

If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...

auto find_if(R &&Range, UnaryPredicate P)

Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.

bool isPreISelGenericFloatingPointOpcode(unsigned Opc)

Returns whether opcode Opc is a pre-isel generic floating-point opcode, having only floating-point op...

bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI)

Returns true if Val can be assumed to never be a signaling NaN.

std::optional< DefinitionAndSourceRegister > getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)

Find the def instruction for Reg, and underlying value Register folding away any copies.

bool is_contained(R &&Range, const E &Element)

Returns true if Element is found in Range.

Align commonAlignment(Align A, uint64_t Offset)

Returns the alignment that satisfies both alignments.

void eraseInstrs(ArrayRef< MachineInstr * > DeadInstrs, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)

void salvageDebugInfoForDbgValue(const MachineRegisterInfo &MRI, MachineInstr &MI, ArrayRef< MachineOperand * > DbgUsers)

Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...

bool isKnownNeverNaN(const Value *V, unsigned Depth, const SimplifyQuery &SQ)

Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...

Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)

Find the source register for Reg, folding away any trivial copies.

LLVM_READNONE LLT getGCDType(LLT OrigTy, LLT TargetTy)

Return a type where the total size is the greatest common divisor of OrigTy and TargetTy.

bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)

Returns true if V cannot be poison, but may be undef.

LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)

Implements IEEE 754-2019 minimum semantics.

std::optional< int64_t > getIConstantSplatSExtVal(const Register Reg, const MachineRegisterInfo &MRI)

void extractVectorParts(Register Reg, unsigned NumElts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)

Version which handles irregular sub-vector splits.

int getSplatIndex(ArrayRef< int > Mask)

If all non-negative Mask elements are the same value, return that value.

bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)

Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...

Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)

void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)

Report an ISel warning as a missed optimization remark to the LLVMContext's diagnostic stream.

This struct is a compact representation of a valid (non-zero power of two) alignment.

Simple struct used to hold a Register value and the instruction which defines it.

unsigned countMaxPopulation() const

Returns the maximum number of bits that could be one.

unsigned countMinPopulation() const

Returns the number of bits known to be one.

This class contains a discriminated union of information about pointers in memory operands,...

int64_t Offset

Offset - This is an offset from the base Value*.

PointerUnion< const Value *, const PseudoSourceValue * > V

This is the IR pointer value for the access, or it is null if unknown.

Simple struct used to hold a constant integer value and a virtual register.