LLVM: lib/CodeGen/GlobalISel/GISelValueTracking.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

40

41#define DEBUG_TYPE "gisel-known-bits"

42

43using namespace llvm;

45

47

49 "Analysis for ComputingKnownBits", false, true)

50

52 : MF(MF), MRI(MF.getRegInfo()), TL(*MF.getSubtarget().getTargetLowering()),

53 DL(MF.getFunction().getDataLayout()), MaxDepth(MaxDepth) {}

54

57 switch (MI->getOpcode()) {

58 case TargetOpcode::COPY:

60 case TargetOpcode::G_ASSERT_ALIGN: {

61

62 return Align(MI->getOperand(2).getImm());

63 }

64 case TargetOpcode::G_FRAME_INDEX: {

65 int FrameIdx = MI->getOperand(1).getIndex();

66 return MF.getFrameInfo().getObjectAlign(FrameIdx);

67 }

68 case TargetOpcode::G_INTRINSIC:

69 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:

70 case TargetOpcode::G_INTRINSIC_CONVERGENT:

71 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:

72 default:

73 return TL.computeKnownAlignForTargetInstr(*this, R, MRI, Depth + 1);

74 }

75}

76

78 assert(MI.getNumExplicitDefs() == 1 &&

79 "expected single return generic instruction");

81}

82

84 const LLT Ty = MRI.getType(R);

85

86

87

88 APInt DemandedElts =

91}

92

94 const APInt &DemandedElts,

98 return Known;

99}

100

102 LLT Ty = MRI.getType(R);

103 unsigned BitWidth = Ty.getScalarSizeInBits();

105}

106

110

114

115[[maybe_unused]] static void

117 dbgs() << "[" << Depth << "] Compute known bits: " << MI << "[" << Depth

118 << "] Computed for: " << MI << "[" << Depth << "] Known: 0x"

120 << "[" << Depth << "] Zero: 0x" << toString(Known.Zero, 16, false)

121 << "\n"

122 << "[" << Depth << "] One: 0x" << toString(Known.One, 16, false)

123 << "\n";

124}

125

126

127void GISelValueTracking::computeKnownBitsMin(Register Src0, Register Src1,

129 const APInt &DemandedElts,

130 unsigned Depth) {

131

133

134

136 return;

137

138 KnownBits Known2;

140

141

143}

144

145

146

147

158

160 const APInt &DemandedElts,

161 unsigned Depth) {

163 unsigned Opcode = MI.getOpcode();

164 LLT DstTy = MRI.getType(R);

165

166

167

168

171 return;

172 }

173

174#ifndef NDEBUG

178 "DemandedElt width should equal the fixed vector number of elements");

179 } else {

181 "DemandedElt width should be 1 for scalars or scalable vectors");

182 }

183#endif

184

187

188

189

190

191

192

193

194

195

197 return;

198

199 if (!DemandedElts)

200 return;

201

203

204 switch (Opcode) {

205 default:

206 TL.computeKnownBitsForTargetInstr(*this, R, Known, DemandedElts, MRI,

208 break;

209 case TargetOpcode::G_BUILD_VECTOR: {

210

214 if (!DemandedElts[I])

215 continue;

216

218

219

221

222

224 break;

225 }

226 break;

227 }

228 case TargetOpcode::G_SPLAT_VECTOR: {

231

232

234 break;

235 }

236 case TargetOpcode::COPY:

237 case TargetOpcode::G_PHI:

238 case TargetOpcode::PHI: {

241

242

243

244 assert(MI.getOperand(0).getSubReg() == 0 && "Is this code in SSA?");

245

246

247 for (unsigned Idx = 1; Idx < MI.getNumOperands(); Idx += 2) {

249 Register SrcReg = Src.getReg();

250 LLT SrcTy = MRI.getType(SrcReg);

251

252

253

254

255

256

257

258 if (SrcReg.isVirtual() && Src.getSubReg() == 0 &&

259 SrcTy.isValid()) {

260

261

262

263 APInt NowDemandedElts = SrcTy.isFixedVector() && !DstTy.isFixedVector()

265 : DemandedElts;

266

268 Depth + (Opcode != TargetOpcode::COPY));

271

272

274 break;

275 } else {

276

278 break;

279 }

280 }

281 break;

282 }

283 case TargetOpcode::G_CONSTANT: {

285 break;

286 }

287 case TargetOpcode::G_FRAME_INDEX: {

288 int FrameIdx = MI.getOperand(1).getIndex();

289 TL.computeKnownBitsForFrameIndex(FrameIdx, Known, MF);

290 break;

291 }

292 case TargetOpcode::G_SUB: {

298 break;

299 }

300 case TargetOpcode::G_XOR: {

305

306 Known ^= Known2;

307 break;

308 }

309 case TargetOpcode::G_PTR_ADD: {

311 break;

312

313 LLT Ty = MRI.getType(MI.getOperand(1).getReg());

314 if (DL.isNonIntegralAddressSpace(Ty.getAddressSpace()))

315 break;

316 [[fallthrough]];

317 }

318 case TargetOpcode::G_ADD: {

324 break;

325 }

326 case TargetOpcode::G_AND: {

327

332

333 Known &= Known2;

334 break;

335 }

336 case TargetOpcode::G_OR: {

337

342

343 Known |= Known2;

344 break;

345 }

346 case TargetOpcode::G_MUL: {

352 break;

353 }

354 case TargetOpcode::G_UMULH: {

360 break;

361 }

362 case TargetOpcode::G_SMULH: {

368 break;

369 }

370 case TargetOpcode::G_SELECT: {

371 computeKnownBitsMin(MI.getOperand(2).getReg(), MI.getOperand(3).getReg(),

372 Known, DemandedElts, Depth + 1);

373 break;

374 }

375 case TargetOpcode::G_SMIN: {

376

383 break;

384 }

385 case TargetOpcode::G_SMAX: {

386

393 break;

394 }

395 case TargetOpcode::G_UMIN: {

402 break;

403 }

404 case TargetOpcode::G_UMAX: {

411 break;

412 }

413 case TargetOpcode::G_FCMP:

414 case TargetOpcode::G_ICMP: {

416 break;

417 if (TL.getBooleanContents(DstTy.isVector(),

418 Opcode == TargetOpcode::G_FCMP) ==

422 break;

423 }

424 case TargetOpcode::G_SEXT: {

427

428

430 break;

431 }

432 case TargetOpcode::G_ASSERT_SEXT:

433 case TargetOpcode::G_SEXT_INREG: {

436 Known = Known.sextInReg(MI.getOperand(2).getImm());

437 break;

438 }

439 case TargetOpcode::G_ANYEXT: {

443 break;

444 }

445 case TargetOpcode::G_LOAD: {

451 break;

452 }

453 case TargetOpcode::G_SEXTLOAD:

454 case TargetOpcode::G_ZEXTLOAD: {

456 break;

461 Known = Opcode == TargetOpcode::G_SEXTLOAD

464 break;

465 }

466 case TargetOpcode::G_ASHR: {

473 break;

474 }

475 case TargetOpcode::G_LSHR: {

482 break;

483 }

484 case TargetOpcode::G_SHL: {

491 break;

492 }

493 case TargetOpcode::G_INTTOPTR:

494 case TargetOpcode::G_PTRTOINT:

496 break;

497

498 [[fallthrough]];

499 case TargetOpcode::G_ZEXT:

500 case TargetOpcode::G_TRUNC: {

501 Register SrcReg = MI.getOperand(1).getReg();

504 break;

505 }

506 case TargetOpcode::G_ASSERT_ZEXT: {

507 Register SrcReg = MI.getOperand(1).getReg();

509

510 unsigned SrcBitWidth = MI.getOperand(2).getImm();

511 assert(SrcBitWidth && "SrcBitWidth can't be zero");

513 Known.Zero |= (~InMask);

514 Known.One &= (~Known.Zero);

515 break;

516 }

517 case TargetOpcode::G_ASSERT_ALIGN: {

518 int64_t LogOfAlign = Log2_64(MI.getOperand(2).getImm());

519

520

521

522

525 break;

526 }

527 case TargetOpcode::G_MERGE_VALUES: {

528 unsigned NumOps = MI.getNumOperands();

529 unsigned OpSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();

530

531 for (unsigned I = 0; I != NumOps - 1; ++I) {

534 DemandedElts, Depth + 1);

535 Known.insertBits(SrcOpKnown, I * OpSize);

536 }

537 break;

538 }

539 case TargetOpcode::G_UNMERGE_VALUES: {

540 unsigned NumOps = MI.getNumOperands();

542 LLT SrcTy = MRI.getType(SrcReg);

543

544 if (SrcTy.isVector() && SrcTy.getScalarType() != DstTy.getScalarType())

545 return;

546

547

548 unsigned DstIdx = 0;

549 for (; DstIdx != NumOps - 1 && MI.getOperand(DstIdx).getReg() != R;

550 ++DstIdx)

551 ;

552

553 APInt SubDemandedElts = DemandedElts;

554 if (SrcTy.isVector()) {

556 SubDemandedElts =

557 DemandedElts.zext(SrcTy.getNumElements()).shl(DstIdx * DstLanes);

558 }

559

562

563 if (SrcTy.isVector())

564 Known = std::move(SrcOpKnown);

565 else

567 break;

568 }

569 case TargetOpcode::G_BSWAP: {

570 Register SrcReg = MI.getOperand(1).getReg();

573 break;

574 }

575 case TargetOpcode::G_BITREVERSE: {

576 Register SrcReg = MI.getOperand(1).getReg();

579 break;

580 }

581 case TargetOpcode::G_CTPOP: {

584

585

589

590

591 break;

592 }

593 case TargetOpcode::G_UBFX: {

594 KnownBits SrcOpKnown, OffsetKnown, WidthKnown;

602 break;

603 }

604 case TargetOpcode::G_SBFX: {

605 KnownBits SrcOpKnown, OffsetKnown, WidthKnown;

615

616

620 break;

621 }

622 case TargetOpcode::G_UADDO:

623 case TargetOpcode::G_UADDE:

624 case TargetOpcode::G_SADDO:

625 case TargetOpcode::G_SADDE:

626 case TargetOpcode::G_USUBO:

627 case TargetOpcode::G_USUBE:

628 case TargetOpcode::G_SSUBO:

629 case TargetOpcode::G_SSUBE:

630 case TargetOpcode::G_UMULO:

631 case TargetOpcode::G_SMULO: {

632 if (MI.getOperand(1).getReg() == R) {

633

634

635 if (TL.getBooleanContents(DstTy.isVector(), false) ==

639 }

640 break;

641 }

642 case TargetOpcode::G_CTLZ:

643 case TargetOpcode::G_CTLZ_ZERO_UNDEF: {

647

651 break;

652 }

653 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {

657

659

660 LLT VecVT = MRI.getType(InVec);

661

663 break;

664

667

668

670 break;

671

674

675

676

678 if (ConstEltNo && ConstEltNo->ult(NumSrcElts))

679 DemandedSrcElts =

681

683 break;

684 }

685 case TargetOpcode::G_SHUFFLE_VECTOR: {

686 APInt DemandedLHS, DemandedRHS;

687

688

689 unsigned NumElts = MRI.getType(MI.getOperand(1).getReg()).getNumElements();

691 DemandedElts, DemandedLHS, DemandedRHS))

692 break;

693

694

697 if (!!DemandedLHS) {

701 }

702

704 break;

705 if (!!DemandedRHS) {

709 }

710 break;

711 }

712 case TargetOpcode::G_CONCAT_VECTORS: {

713 if (MRI.getType(MI.getOperand(0).getReg()).isScalableVector())

714 break;

715

718 unsigned NumSubVectorElts =

719 MRI.getType(MI.getOperand(1).getReg()).getNumElements();

720

722 APInt DemandedSub =

723 DemandedElts.extractBits(NumSubVectorElts, I * NumSubVectorElts);

724 if (!!DemandedSub) {

726

728 }

729

731 break;

732 }

733 break;

734 }

735 case TargetOpcode::G_ABS: {

736 Register SrcReg = MI.getOperand(1).getReg();

738 Known = Known.abs();

740 1);

741 break;

742 }

743 }

744

746}

747

749 Ty = Ty.getScalarType();

753}

754

757 unsigned Depth) {

758 LLT Ty = MRI.getType(R);

759 APInt DemandedElts =

761 computeKnownFPClass(R, DemandedElts, InterestedClasses, Known, Depth);

762}

763

764void GISelValueTracking::computeKnownFPClassForFPTrunc(

769 return;

770

771 Register Val = MI.getOperand(1).getReg();

772 KnownFPClass KnownSrc;

773 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,

775

776

777

780

782

783

784}

785

786void GISelValueTracking::computeKnownFPClass(Register R,

787 const APInt &DemandedElts,

790 unsigned Depth) {

791 assert(Known.isUnknown() && "should not be called with known information");

792

793 if (!DemandedElts) {

794

796 return;

797 }

798

800

801 MachineInstr &MI = *MRI.getVRegDef(R);

802 unsigned Opcode = MI.getOpcode();

803 LLT DstTy = MRI.getType(R);

804

807 return;

808 }

809

811 switch (Cst->getKind()) {

813 auto APF = Cst->getScalarValue();

815 Known.SignBit = APF.isNegative();

816 break;

817 }

820 bool SignBitAllZero = true;

821 bool SignBitAllOne = true;

822

823 for (auto C : *Cst) {

825 if (C.isNegative())

826 SignBitAllZero = false;

827 else

828 SignBitAllOne = false;

829 }

830

831 if (SignBitAllOne != SignBitAllZero)

832 Known.SignBit = SignBitAllOne;

833

834 break;

835 }

838 break;

839 }

840 }

841

842 return;

843 }

844

847 KnownNotFromFlags |= fcNan;

849 KnownNotFromFlags |= fcInf;

850

851

852

853 InterestedClasses &= ~KnownNotFromFlags;

854

855 auto ClearClassesFromFlags =

857

858

860 return;

861

862 const MachineFunction *MF = MI.getMF();

863

864 switch (Opcode) {

865 default:

866 TL.computeKnownFPClassForTargetInstr(*this, R, Known, DemandedElts, MRI,

868 break;

869 case TargetOpcode::G_FNEG: {

870 Register Val = MI.getOperand(1).getReg();

871 computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known, Depth + 1);

872 Known.fneg();

873 break;

874 }

875 case TargetOpcode::G_SELECT: {

880

883

888

893

894

895

896

897

898 bool LookThroughFAbsFNeg = CmpLHS != LHS && CmpLHS != RHS;

899 std::tie(TestedValue, MaskIfTrue, MaskIfFalse) =

900 fcmpImpliesClass(Pred, *MF, CmpLHS, CmpRHS, LookThroughFAbsFNeg);

905 MaskIfTrue = TestedMask;

906 MaskIfFalse = ~TestedMask;

907 }

908

909 if (TestedValue == LHS) {

910

911 FilterLHS = MaskIfTrue;

912 } else if (TestedValue == RHS) {

913

914 FilterRHS = MaskIfFalse;

915 }

916

917 KnownFPClass Known2;

918 computeKnownFPClass(LHS, DemandedElts, InterestedClasses & FilterLHS, Known,

921

922 computeKnownFPClass(RHS, DemandedElts, InterestedClasses & FilterRHS,

923 Known2, Depth + 1);

925

926 Known |= Known2;

927 break;

928 }

929 case TargetOpcode::G_FCOPYSIGN: {

930 Register Magnitude = MI.getOperand(1).getReg();

931 Register Sign = MI.getOperand(2).getReg();

932

933 KnownFPClass KnownSign;

934

935 computeKnownFPClass(Magnitude, DemandedElts, InterestedClasses, Known,

937 computeKnownFPClass(Sign, DemandedElts, InterestedClasses, KnownSign,

940 break;

941 }

942 case TargetOpcode::G_FMA:

943 case TargetOpcode::G_STRICT_FMA:

944 case TargetOpcode::G_FMAD: {

946 break;

947

951

952 if (A != B)

953 break;

954

955

957

958

959 KnownFPClass KnownAddend;

960 computeKnownFPClass(C, DemandedElts, InterestedClasses, KnownAddend,

962

965 break;

966 }

967 case TargetOpcode::G_FSQRT:

968 case TargetOpcode::G_STRICT_FSQRT: {

969 KnownFPClass KnownSrc;

970 FPClassTest InterestedSrcs = InterestedClasses;

971 if (InterestedClasses & fcNan)

973

974 Register Val = MI.getOperand(1).getReg();

975

976 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc, Depth + 1);

977

982

983

986

987

989 break;

990 }

991 case TargetOpcode::G_FABS: {

993 Register Val = MI.getOperand(1).getReg();

994

995

996 computeKnownFPClass(Val, DemandedElts, InterestedClasses, Known,

998 }

999 Known.fabs();

1000 break;

1001 }

1002 case TargetOpcode::G_FSIN:

1003 case TargetOpcode::G_FCOS:

1004 case TargetOpcode::G_FSINCOS: {

1005

1006 Register Val = MI.getOperand(1).getReg();

1007 KnownFPClass KnownSrc;

1008

1009 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,

1012

1015 break;

1016 }

1017 case TargetOpcode::G_FMAXNUM:

1018 case TargetOpcode::G_FMINNUM:

1019 case TargetOpcode::G_FMINNUM_IEEE:

1020 case TargetOpcode::G_FMAXIMUM:

1021 case TargetOpcode::G_FMINIMUM:

1022 case TargetOpcode::G_FMAXNUM_IEEE:

1023 case TargetOpcode::G_FMAXIMUMNUM:

1024 case TargetOpcode::G_FMINIMUMNUM: {

1027 KnownFPClass KnownLHS, KnownRHS;

1028

1029 computeKnownFPClass(LHS, DemandedElts, InterestedClasses, KnownLHS,

1031 computeKnownFPClass(RHS, DemandedElts, InterestedClasses, KnownRHS,

1033

1035 Known = KnownLHS | KnownRHS;

1036

1037

1038 if (NeverNaN && (Opcode == TargetOpcode::G_FMINNUM ||

1039 Opcode == TargetOpcode::G_FMAXNUM ||

1040 Opcode == TargetOpcode::G_FMINIMUMNUM ||

1041 Opcode == TargetOpcode::G_FMAXIMUMNUM))

1043

1044 if (Opcode == TargetOpcode::G_FMAXNUM ||

1045 Opcode == TargetOpcode::G_FMAXIMUMNUM ||

1046 Opcode == TargetOpcode::G_FMAXNUM_IEEE) {

1047

1048

1054 } else if (Opcode == TargetOpcode::G_FMAXIMUM) {

1055

1056

1060 } else if (Opcode == TargetOpcode::G_FMINNUM ||

1061 Opcode == TargetOpcode::G_FMINIMUMNUM ||

1062 Opcode == TargetOpcode::G_FMINNUM_IEEE) {

1063

1064

1070 } else if (Opcode == TargetOpcode::G_FMINIMUM) {

1071

1072

1076 } else {

1078 }

1079

1080

1081

1082

1083

1084

1085

1086

1089 DenormalMode Mode =

1093 }

1094

1100 else

1102 } else if ((Opcode == TargetOpcode::G_FMAXIMUM ||

1103 Opcode == TargetOpcode::G_FMINIMUM) ||

1104 Opcode == TargetOpcode::G_FMAXIMUMNUM ||

1105 Opcode == TargetOpcode::G_FMINIMUMNUM ||

1106 Opcode == TargetOpcode::G_FMAXNUM_IEEE ||

1107 Opcode == TargetOpcode::G_FMINNUM_IEEE ||

1108

1113 if ((Opcode == TargetOpcode::G_FMAXIMUM ||

1114 Opcode == TargetOpcode::G_FMAXNUM ||

1115 Opcode == TargetOpcode::G_FMAXIMUMNUM ||

1116 Opcode == TargetOpcode::G_FMAXNUM_IEEE) &&

1117 (KnownLHS.SignBit == false || KnownRHS.SignBit == false))

1119 else if ((Opcode == TargetOpcode::G_FMINIMUM ||

1120 Opcode == TargetOpcode::G_FMINNUM ||

1121 Opcode == TargetOpcode::G_FMINIMUMNUM ||

1122 Opcode == TargetOpcode::G_FMINNUM_IEEE) &&

1123 (KnownLHS.SignBit == true || KnownRHS.SignBit == true))

1125 }

1126 }

1127 break;

1128 }

1129 case TargetOpcode::G_FCANONICALIZE: {

1130 Register Val = MI.getOperand(1).getReg();

1131 KnownFPClass KnownSrc;

1132 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,

1134

1135

1136

1137

1138

1139

1140

1142

1143

1144

1147 else

1149

1150

1151

1154 DenormalMode DenormMode = MF->getDenormalMode(FPType);

1160 break;

1161 }

1162

1165

1170

1171 break;

1172 }

1173 case TargetOpcode::G_VECREDUCE_FMAX:

1174 case TargetOpcode::G_VECREDUCE_FMIN:

1175 case TargetOpcode::G_VECREDUCE_FMAXIMUM:

1176 case TargetOpcode::G_VECREDUCE_FMINIMUM: {

1177 Register Val = MI.getOperand(1).getReg();

1178

1179

1180

1181 Known =

1182 computeKnownFPClass(Val, MI.getFlags(), InterestedClasses, Depth + 1);

1183

1186 break;

1187 }

1188 case TargetOpcode::G_TRUNC:

1189 case TargetOpcode::G_FFLOOR:

1190 case TargetOpcode::G_FCEIL:

1191 case TargetOpcode::G_FRINT:

1192 case TargetOpcode::G_FNEARBYINT:

1193 case TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND:

1194 case TargetOpcode::G_INTRINSIC_ROUND: {

1195 Register Val = MI.getOperand(1).getReg();

1196 KnownFPClass KnownSrc;

1197 FPClassTest InterestedSrcs = InterestedClasses;

1202 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc, Depth + 1);

1203

1204

1206

1208

1209

1210

1211

1216

1217 break;

1218 }

1219 case TargetOpcode::G_FEXP:

1220 case TargetOpcode::G_FEXP2:

1221 case TargetOpcode::G_FEXP10: {

1223 if ((InterestedClasses & fcNan) == fcNone)

1224 break;

1225

1226 Register Val = MI.getOperand(1).getReg();

1227 KnownFPClass KnownSrc;

1228 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,

1233 }

1234

1235 break;

1236 }

1237 case TargetOpcode::G_FLOG:

1238 case TargetOpcode::G_FLOG2:

1239 case TargetOpcode::G_FLOG10: {

1240

1241

1242

1243

1245 break;

1246

1247 FPClassTest InterestedSrcs = InterestedClasses;

1250 if ((InterestedClasses & fcNan) != fcNone)

1252

1253 Register Val = MI.getOperand(1).getReg();

1254 KnownFPClass KnownSrc;

1255 computeKnownFPClass(Val, DemandedElts, InterestedSrcs, KnownSrc, Depth + 1);

1256

1259

1262

1265 DenormalMode Mode = MF->getDenormalMode(FltSem);

1266

1269

1270 break;

1271 }

1272 case TargetOpcode::G_FPOWI: {

1274 break;

1275

1277 LLT ExpTy = MRI.getType(Exp);

1279 Exp, ExpTy.isVector() ? DemandedElts : APInt(1, 1), Depth + 1);

1280

1281 if (ExponentKnownBits.Zero[0]) {

1283 break;

1284 }

1285

1286

1287

1288

1289

1290

1291

1292

1293

1294 Register Val = MI.getOperand(1).getReg();

1295 KnownFPClass KnownSrc;

1296 computeKnownFPClass(Val, DemandedElts, fcNegative, KnownSrc, Depth + 1);

1299 break;

1300 }

1301 case TargetOpcode::G_FLDEXP:

1302 case TargetOpcode::G_STRICT_FLDEXP: {

1303 Register Val = MI.getOperand(1).getReg();

1304 KnownFPClass KnownSrc;

1305 computeKnownFPClass(Val, DemandedElts, InterestedClasses, KnownSrc,

1307 Known.propagateNaN(KnownSrc, true);

1308

1309

1314

1319

1320

1322 if ((InterestedClasses & ExpInfoMask) == fcNone)

1323 break;

1325 break;

1326

1327

1328

1329 break;

1330 }

1331 case TargetOpcode::G_INTRINSIC_ROUNDEVEN: {

1332 computeKnownFPClassForFPTrunc(MI, DemandedElts, InterestedClasses, Known,

1334 break;

1335 }

1336 case TargetOpcode::G_FADD:

1337 case TargetOpcode::G_STRICT_FADD:

1338 case TargetOpcode::G_FSUB:

1339 case TargetOpcode::G_STRICT_FSUB: {

1342 KnownFPClass KnownLHS, KnownRHS;

1343 bool WantNegative =

1344 (Opcode == TargetOpcode::G_FADD ||

1345 Opcode == TargetOpcode::G_STRICT_FADD) &&

1347 bool WantNaN = (InterestedClasses & fcNan) != fcNone;

1348 bool WantNegZero = (InterestedClasses & fcNegZero) != fcNone;

1349

1350 if (!WantNaN && !WantNegative && !WantNegZero)

1351 break;

1352

1353 FPClassTest InterestedSrcs = InterestedClasses;

1354 if (WantNegative)

1356 if (InterestedClasses & fcNan)

1357 InterestedSrcs |= fcInf;

1358 computeKnownFPClass(RHS, DemandedElts, InterestedSrcs, KnownRHS, Depth + 1);

1359

1362 WantNegZero ||

1363 (Opcode == TargetOpcode::G_FSUB ||

1364 Opcode == TargetOpcode::G_STRICT_FSUB)) {

1365

1366

1367

1368 computeKnownFPClass(LHS, DemandedElts, InterestedSrcs, KnownLHS,

1370

1371

1375

1376 if (Opcode == TargetOpcode::G_FADD ||

1377 Opcode == TargetOpcode::G_STRICT_FADD) {

1381

1382

1387

1390 } else {

1391

1396

1399 }

1400 }

1401

1402 break;

1403 }

1404 case TargetOpcode::G_FMUL:

1405 case TargetOpcode::G_STRICT_FMUL: {

1408

1411

1412 if ((InterestedClasses & fcNan) != fcNan)

1413 break;

1414

1415

1417

1418 KnownFPClass KnownLHS, KnownRHS;

1419 computeKnownFPClass(RHS, DemandedElts, NeedForNan, KnownRHS, Depth + 1);

1421 break;

1422

1423 computeKnownFPClass(LHS, DemandedElts, NeedForNan, KnownLHS, Depth + 1);

1425 break;

1426

1430 else

1432 }

1433

1434

1437 break;

1438 }

1439

1447

1448 break;

1449 }

1450 case TargetOpcode::G_FDIV:

1451 case TargetOpcode::G_FREM: {

1454

1456

1457 if (Opcode == TargetOpcode::G_FDIV) {

1458

1460 } else {

1461

1463 }

1464

1465 break;

1466 }

1467

1468 const bool WantNan = (InterestedClasses & fcNan) != fcNone;

1469 const bool WantNegative = (InterestedClasses & fcNegative) != fcNone;

1470 const bool WantPositive = Opcode == TargetOpcode::G_FREM &&

1472 if (!WantNan && !WantNegative && !WantPositive)

1473 break;

1474

1475 KnownFPClass KnownLHS, KnownRHS;

1476

1478 KnownRHS, Depth + 1);

1479

1480 bool KnowSomethingUseful =

1482

1483 if (KnowSomethingUseful || WantPositive) {

1487

1488 computeKnownFPClass(LHS, DemandedElts, InterestedClasses & InterestedLHS,

1489 KnownLHS, Depth + 1);

1490 }

1491

1492 if (Opcode == TargetOpcode::G_FDIV) {

1493

1502 }

1503

1504

1505

1509 } else {

1510

1516 }

1517

1518

1523

1524

1529 }

1530

1531 break;

1532 }

1533 case TargetOpcode::G_FPEXT: {

1534 Register Dst = MI.getOperand(0).getReg();

1535 Register Src = MI.getOperand(1).getReg();

1536

1537 computeKnownFPClass(R, DemandedElts, InterestedClasses, Known, Depth + 1);

1538

1541 LLT SrcTy = MRI.getType(Src).getScalarType();

1543

1544

1551 }

1552

1553

1555 Known.SignBit = std::nullopt;

1556 break;

1557 }

1558 case TargetOpcode::G_FPTRUNC: {

1559 computeKnownFPClassForFPTrunc(MI, DemandedElts, InterestedClasses, Known,

1561 break;

1562 }

1563 case TargetOpcode::G_SITOFP:

1564 case TargetOpcode::G_UITOFP: {

1565

1567

1568

1570

1571

1573 if (Opcode == TargetOpcode::G_UITOFP)

1575

1576 Register Val = MI.getOperand(1).getReg();

1577 LLT Ty = MRI.getType(Val);

1578

1579 if (InterestedClasses & fcInf) {

1580

1581

1582

1584 if (Opcode == TargetOpcode::G_SITOFP)

1585 --IntSize;

1586

1587

1588

1593 }

1594

1595 break;

1596 }

1597

1598 case TargetOpcode::G_BUILD_VECTOR:

1599 case TargetOpcode::G_CONCAT_VECTORS: {

1601

1603 break;

1604

1605 bool First = true;

1606 for (unsigned Idx = 0; Idx < Merge.getNumSources(); ++Idx) {

1607

1608 bool NeedsElt = DemandedElts[Idx];

1609

1610

1611 if (NeedsElt) {

1614 computeKnownFPClass(Src, Known, InterestedClasses, Depth + 1);

1616 } else {

1617 KnownFPClass Known2;

1618 computeKnownFPClass(Src, Known2, InterestedClasses, Depth + 1);

1619 Known |= Known2;

1620 }

1621

1622

1624 break;

1625 }

1626 }

1627

1628 break;

1629 }

1630 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {

1631

1632

1633

1637

1639

1640 LLT VecTy = MRI.getType(Vec);

1641

1645 if (CIdx && CIdx->ult(NumElts))

1647 return computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known,

1649 }

1650

1651 break;

1652 }

1653 case TargetOpcode::G_INSERT_VECTOR_ELT: {

1658

1659 LLT VecTy = MRI.getType(Vec);

1660

1662 return;

1663

1665

1666 unsigned NumElts = DemandedElts.getBitWidth();

1667 APInt DemandedVecElts = DemandedElts;

1668 bool NeedsElt = true;

1669

1670 if (CIdx && CIdx->ult(NumElts)) {

1671 DemandedVecElts.clearBit(CIdx->getZExtValue());

1672 NeedsElt = DemandedElts[CIdx->getZExtValue()];

1673 }

1674

1675

1676 if (NeedsElt) {

1677 computeKnownFPClass(Elt, Known, InterestedClasses, Depth + 1);

1678

1680 break;

1681 } else {

1683 }

1684

1685

1686 if (!DemandedVecElts.isZero()) {

1687 KnownFPClass Known2;

1688 computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2,

1690 Known |= Known2;

1691 }

1692

1693 break;

1694 }

1695 case TargetOpcode::G_SHUFFLE_VECTOR: {

1696

1697

1699 APInt DemandedLHS, DemandedRHS;

1701 assert(DemandedElts == APInt(1, 1));

1702 DemandedLHS = DemandedRHS = DemandedElts;

1703 } else {

1705 DemandedElts, DemandedLHS,

1706 DemandedRHS)) {

1708 return;

1709 }

1710 }

1711

1712 if (!!DemandedLHS) {

1714 computeKnownFPClass(LHS, DemandedLHS, InterestedClasses, Known,

1716

1717

1719 break;

1720 } else {

1722 }

1723

1724 if (!!DemandedRHS) {

1725 KnownFPClass Known2;

1727 computeKnownFPClass(RHS, DemandedRHS, InterestedClasses, Known2,

1729 Known |= Known2;

1730 }

1731 break;

1732 }

1733 case TargetOpcode::COPY: {

1734 Register Src = MI.getOperand(1).getReg();

1735

1736 if (!Src.isVirtual())

1737 return;

1738

1739 computeKnownFPClass(Src, DemandedElts, InterestedClasses, Known, Depth + 1);

1740 break;

1741 }

1742 }

1743}

1744

1746GISelValueTracking::computeKnownFPClass(Register R, const APInt &DemandedElts,

1748 unsigned Depth) {

1750 computeKnownFPClass(R, DemandedElts, InterestedClasses, KnownClasses, Depth);

1751 return KnownClasses;

1752}

1753

1757 computeKnownFPClass(R, Known, InterestedClasses, Depth);

1758 return Known;

1759}

1760

1765 InterestedClasses &= ~fcNan;

1767 InterestedClasses &= ~fcInf;

1768

1770 computeKnownFPClass(R, DemandedElts, InterestedClasses, Depth);

1771

1773 Result.KnownFPClasses &= ~fcNan;

1775 Result.KnownFPClasses &= ~fcInf;

1776 return Result;

1777}

1778

1781 LLT Ty = MRI.getType(R);

1782 APInt DemandedElts =

1784 return computeKnownFPClass(R, DemandedElts, Flags, InterestedClasses, Depth);

1785}

1786

1787

1788unsigned GISelValueTracking::computeNumSignBitsMin(Register Src0, Register Src1,

1789 const APInt &DemandedElts,

1790 unsigned Depth) {

1791

1793 if (Src1SignBits == 1)

1794 return 1;

1796}

1797

1798

1799

1800

1802 unsigned TyBits) {

1804 if (!Ranges)

1805 return 1;

1806

1810 case TargetOpcode::G_SEXTLOAD:

1812 break;

1813 case TargetOpcode::G_ZEXTLOAD:

1815 break;

1816 default:

1817 break;

1818 }

1819 }

1820

1823}

1824

1826 const APInt &DemandedElts,

1827 unsigned Depth) {

1829 unsigned Opcode = MI.getOpcode();

1830

1831 if (Opcode == TargetOpcode::G_CONSTANT)

1832 return MI.getOperand(1).getCImm()->getValue().getNumSignBits();

1833

1835 return 1;

1836

1837 if (!DemandedElts)

1838 return 1;

1839

1840 LLT DstTy = MRI.getType(R);

1842

1843

1844

1845

1846

1848 return 1;

1849

1850 unsigned FirstAnswer = 1;

1851 switch (Opcode) {

1852 case TargetOpcode::COPY: {

1854 if (Src.getReg().isVirtual() && Src.getSubReg() == 0 &&

1855 MRI.getType(Src.getReg()).isValid()) {

1856

1858 }

1859

1860 return 1;

1861 }

1862 case TargetOpcode::G_SEXT: {

1863 Register Src = MI.getOperand(1).getReg();

1864 LLT SrcTy = MRI.getType(Src);

1865 unsigned Tmp = DstTy.getScalarSizeInBits() - SrcTy.getScalarSizeInBits();

1867 }

1868 case TargetOpcode::G_ASSERT_SEXT:

1869 case TargetOpcode::G_SEXT_INREG: {

1870

1871 Register Src = MI.getOperand(1).getReg();

1872 unsigned SrcBits = MI.getOperand(2).getImm();

1873 unsigned InRegBits = TyBits - SrcBits + 1;

1875 InRegBits);

1876 }

1877 case TargetOpcode::G_LOAD: {

1879 if (DemandedElts != 1 || getDataLayout().isLittleEndian())

1880 break;

1881

1883 }

1884 case TargetOpcode::G_SEXTLOAD: {

1886

1887

1889 return 1;

1890

1892 if (NumBits != 1)

1893 return NumBits;

1894

1895

1898 }

1899 case TargetOpcode::G_ZEXTLOAD: {

1901

1902

1904 return 1;

1905

1907 if (NumBits != 1)

1908 return NumBits;

1909

1910

1913 }

1914 case TargetOpcode::G_AND:

1915 case TargetOpcode::G_OR:

1916 case TargetOpcode::G_XOR: {

1917 Register Src1 = MI.getOperand(1).getReg();

1918 unsigned Src1NumSignBits =

1920 if (Src1NumSignBits != 1) {

1921 Register Src2 = MI.getOperand(2).getReg();

1922 unsigned Src2NumSignBits =

1924 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits);

1925 }

1926 break;

1927 }

1928 case TargetOpcode::G_ASHR: {

1929 Register Src1 = MI.getOperand(1).getReg();

1930 Register Src2 = MI.getOperand(2).getReg();

1933 FirstAnswer = std::min<uint64_t>(FirstAnswer + *C, TyBits);

1934 break;

1935 }

1936 case TargetOpcode::G_SHL: {

1937 Register Src1 = MI.getOperand(1).getReg();

1938 Register Src2 = MI.getOperand(2).getReg();

1939 if (std::optional ShAmtRange =

1941 uint64_t MaxShAmt = ShAmtRange->getUnsignedMax().getZExtValue();

1942 uint64_t MinShAmt = ShAmtRange->getUnsignedMin().getZExtValue();

1943

1944 MachineInstr &ExtMI = *MRI.getVRegDef(Src1);

1945 unsigned ExtOpc = ExtMI.getOpcode();

1946

1947

1948

1949

1950

1951

1952 if (ExtOpc == TargetOpcode::G_SEXT || ExtOpc == TargetOpcode::G_ZEXT ||

1953 ExtOpc == TargetOpcode::G_ANYEXT) {

1954 LLT ExtTy = MRI.getType(Src1);

1956 LLT ExtendeeTy = MRI.getType(Extendee);

1959

1960 if (SizeDiff <= MinShAmt) {

1961 unsigned Tmp =

1963 if (MaxShAmt < Tmp)

1964 return Tmp - MaxShAmt;

1965 }

1966 }

1967

1969 if (MaxShAmt < Tmp)

1970 return Tmp - MaxShAmt;

1971 }

1972 break;

1973 }

1974 case TargetOpcode::G_TRUNC: {

1975 Register Src = MI.getOperand(1).getReg();

1976 LLT SrcTy = MRI.getType(Src);

1977

1978

1980 unsigned NumSrcBits = SrcTy.getScalarSizeInBits();

1982 if (NumSrcSignBits > (NumSrcBits - DstTyBits))

1983 return NumSrcSignBits - (NumSrcBits - DstTyBits);

1984 break;

1985 }

1986 case TargetOpcode::G_SELECT: {

1987 return computeNumSignBitsMin(MI.getOperand(2).getReg(),

1988 MI.getOperand(3).getReg(), DemandedElts,

1990 }

1991 case TargetOpcode::G_SMIN:

1992 case TargetOpcode::G_SMAX:

1993 case TargetOpcode::G_UMIN:

1994 case TargetOpcode::G_UMAX:

1995

1996 return computeNumSignBitsMin(MI.getOperand(1).getReg(),

1997 MI.getOperand(2).getReg(), DemandedElts,

1999 case TargetOpcode::G_SADDO:

2000 case TargetOpcode::G_SADDE:

2001 case TargetOpcode::G_UADDO:

2002 case TargetOpcode::G_UADDE:

2003 case TargetOpcode::G_SSUBO:

2004 case TargetOpcode::G_SSUBE:

2005 case TargetOpcode::G_USUBO:

2006 case TargetOpcode::G_USUBE:

2007 case TargetOpcode::G_SMULO:

2008 case TargetOpcode::G_UMULO: {

2009

2010

2011

2012 if (MI.getOperand(1).getReg() == R) {

2013 if (TL.getBooleanContents(DstTy.isVector(), false) ==

2015 return TyBits;

2016 }

2017

2018 break;

2019 }

2020 case TargetOpcode::G_SUB: {

2021 Register Src2 = MI.getOperand(2).getReg();

2022 unsigned Src2NumSignBits =

2024 if (Src2NumSignBits == 1)

2025 return 1;

2026

2027

2028 Register Src1 = MI.getOperand(1).getReg();

2030 if (Known1.isZero()) {

2032

2033

2034 if ((Known2.Zero | 1).isAllOnes())

2035 return TyBits;

2036

2037

2038

2039

2041 FirstAnswer = Src2NumSignBits;

2042 break;

2043 }

2044

2045

2046 }

2047

2048 unsigned Src1NumSignBits =

2050 if (Src1NumSignBits == 1)

2051 return 1;

2052

2053

2054

2055 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits) - 1;

2056 break;

2057 }

2058 case TargetOpcode::G_ADD: {

2059 Register Src2 = MI.getOperand(2).getReg();

2060 unsigned Src2NumSignBits =

2062 if (Src2NumSignBits <= 2)

2063 return 1;

2064

2065 Register Src1 = MI.getOperand(1).getReg();

2066 unsigned Src1NumSignBits =

2068 if (Src1NumSignBits == 1)

2069 return 1;

2070

2071

2075

2076

2077 if ((Known1.Zero | 1).isAllOnes())

2078 return TyBits;

2079

2080

2081

2083 FirstAnswer = Src1NumSignBits;

2084 break;

2085 }

2086

2087

2088 }

2089

2090

2091

2092 FirstAnswer = std::min(Src1NumSignBits, Src2NumSignBits) - 1;

2093 break;

2094 }

2095 case TargetOpcode::G_FCMP:

2096 case TargetOpcode::G_ICMP: {

2097 bool IsFP = Opcode == TargetOpcode::G_FCMP;

2098 if (TyBits == 1)

2099 break;

2100 auto BC = TL.getBooleanContents(DstTy.isVector(), IsFP);

2102 return TyBits;

2104 return TyBits - 1;

2105 break;

2106 }

2107 case TargetOpcode::G_BUILD_VECTOR: {

2108

2109 FirstAnswer = TyBits;

2110 APInt SingleDemandedElt(1, 1);

2112 if (!DemandedElts[I])

2113 continue;

2114

2115 unsigned Tmp2 =

2117 FirstAnswer = std::min(FirstAnswer, Tmp2);

2118

2119

2120 if (FirstAnswer == 1)

2121 break;

2122 }

2123 break;

2124 }

2125 case TargetOpcode::G_CONCAT_VECTORS: {

2126 if (MRI.getType(MI.getOperand(0).getReg()).isScalableVector())

2127 break;

2128 FirstAnswer = TyBits;

2129

2130

2131 unsigned NumSubVectorElts =

2132 MRI.getType(MI.getOperand(1).getReg()).getNumElements();

2134 APInt DemandedSub =

2135 DemandedElts.extractBits(NumSubVectorElts, I * NumSubVectorElts);

2136 if (!DemandedSub)

2137 continue;

2139

2140 FirstAnswer = std::min(FirstAnswer, Tmp2);

2141

2142

2143 if (FirstAnswer == 1)

2144 break;

2145 }

2146 break;

2147 }

2148 case TargetOpcode::G_SHUFFLE_VECTOR: {

2149

2150

2151 APInt DemandedLHS, DemandedRHS;

2152 Register Src1 = MI.getOperand(1).getReg();

2153 unsigned NumElts = MRI.getType(Src1).getNumElements();

2155 DemandedElts, DemandedLHS, DemandedRHS))

2156 return 1;

2157

2158 if (!!DemandedLHS)

2160

2161 if (FirstAnswer == 1)

2162 break;

2163 if (!!DemandedRHS) {

2164 unsigned Tmp2 =

2166 FirstAnswer = std::min(FirstAnswer, Tmp2);

2167 }

2168 break;

2169 }

2170 case TargetOpcode::G_SPLAT_VECTOR: {

2171

2172 Register Src = MI.getOperand(1).getReg();

2174 unsigned NumSrcBits = MRI.getType(Src).getSizeInBits();

2175 if (NumSrcSignBits > (NumSrcBits - TyBits))

2176 return NumSrcSignBits - (NumSrcBits - TyBits);

2177 break;

2178 }

2179 case TargetOpcode::G_INTRINSIC:

2180 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:

2181 case TargetOpcode::G_INTRINSIC_CONVERGENT:

2182 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:

2183 default: {

2184 unsigned NumBits =

2185 TL.computeNumSignBitsForTargetInstr(*this, R, DemandedElts, MRI, Depth);

2186 if (NumBits > 1)

2187 FirstAnswer = std::max(FirstAnswer, NumBits);

2188 break;

2189 }

2190 }

2191

2192

2193

2197 Mask = Known.Zero;

2198 } else if (Known.isNegative()) {

2199 Mask = Known.One;

2200 } else {

2201

2202 return FirstAnswer;

2203 }

2204

2205

2206

2207 Mask <<= Mask.getBitWidth() - TyBits;

2208 return std::max(FirstAnswer, Mask.countl_one());

2209}

2210

2212 LLT Ty = MRI.getType(R);

2213 APInt DemandedElts =

2216}

2217

2220

2222 unsigned Opcode = MI.getOpcode();

2223

2224 LLT Ty = MRI.getType(R);

2225 unsigned BitWidth = Ty.getScalarSizeInBits();

2226

2227 if (Opcode == TargetOpcode::G_CONSTANT) {

2228 const APInt &ShAmt = MI.getOperand(1).getCImm()->getValue();

2230 return std::nullopt;

2232 }

2233

2234 if (Opcode == TargetOpcode::G_BUILD_VECTOR) {

2235 const APInt *MinAmt = nullptr, *MaxAmt = nullptr;

2236 for (unsigned I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {

2237 if (!DemandedElts[I])

2238 continue;

2239 MachineInstr *Op = MRI.getVRegDef(MI.getOperand(I + 1).getReg());

2240 if (Op->getOpcode() != TargetOpcode::G_CONSTANT) {

2241 MinAmt = MaxAmt = nullptr;

2242 break;

2243 }

2244

2245 const APInt &ShAmt = Op->getOperand(1).getCImm()->getValue();

2247 return std::nullopt;

2248 if (!MinAmt || MinAmt->ugt(ShAmt))

2249 MinAmt = &ShAmt;

2250 if (!MaxAmt || MaxAmt->ult(ShAmt))

2251 MaxAmt = &ShAmt;

2252 }

2253 assert(((!MinAmt && !MaxAmt) || (MinAmt && MaxAmt)) &&

2254 "Failed to find matching min/max shift amounts");

2255 if (MinAmt && MaxAmt)

2257 }

2258

2259

2260

2264

2265 return std::nullopt;

2266}

2267

2270 if (std::optional AmtRange =

2272 return AmtRange->getUnsignedMin().getZExtValue();

2273 return std::nullopt;

2274}

2275

2281

2286

2288 if (!Info) {

2289 unsigned MaxDepth =

2291 Info = std::make_unique(MF, MaxDepth);

2292 }

2293 return *Info;

2294}

2295

2296AnalysisKey GISelValueTrackingAnalysis::Key;

2297

2303

2309 OS << "name: ";

2311 OS << '\n';

2312

2316 if (!MO.isReg() || MO.getReg().isPhysical())

2317 continue;

2319 if (MRI.getType(Reg).isValid())

2320 continue;

2321 KnownBits Known = VTA.getKnownBits(Reg);

2322 unsigned SignedBits = VTA.computeNumSignBits(Reg);

2323 OS << " " << MO << " KnownBits:" << Known << " SignBits:" << SignedBits

2324 << '\n';

2325 };

2326 }

2327 }

2329}

unsigned const MachineRegisterInfo * MRI

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

This file declares a class to represent arbitrary precision floating point values and provide a varie...

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

Utilities for dealing with flags related to floating point properties and mode controls.

static void dumpResult(const MachineInstr &MI, const KnownBits &Known, unsigned Depth)

Definition GISelValueTracking.cpp:116

static unsigned computeNumSignBitsFromRangeMetadata(const GAnyLoad *Ld, unsigned TyBits)

Compute the known number of sign bits with attached range metadata in the memory operand.

Definition GISelValueTracking.cpp:1801

Provides analysis for querying information about KnownBits during GISel passes.

Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...

const size_t AbstractManglingParser< Derived, Alloc >::NumOps

Implement a low-level type suitable for MachineInstr level instruction selection.

Contains matchers for matching SSA Machine Instructions.

Promote Memory to Register

static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)

#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)

const SmallVectorImpl< MachineOperand > & Cond

static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))

This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...

This file describes how to lower LLVM code to machine code.

static bool outputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty)

static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)

static LLVM_ABI bool isRepresentableAsNormalIn(const fltSemantics &Src, const fltSemantics &Dst)

static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)

Returns the largest finite number in the given semantics.

Class for arbitrary precision integers.

static APInt getAllOnes(unsigned numBits)

Return an APInt of a specified width with all bits set.

void clearBit(unsigned BitPosition)

Set a given bit to 0.

LLVM_ABI APInt zext(unsigned width) const

Zero extend to a new width.

static APInt getSignMask(unsigned BitWidth)

Get the SignMask for a specific bit width.

void setHighBits(unsigned hiBits)

Set the top hiBits bits.

void setBitsFrom(unsigned loBit)

Set the top bits starting from loBit.

bool ugt(const APInt &RHS) const

Unsigned greater than comparison.

bool isZero() const

Determine if this value is zero, i.e. all bits are clear.

unsigned getBitWidth() const

Return the number of bits in the APInt.

bool ult(const APInt &RHS) const

Unsigned less than comparison.

unsigned getNumSignBits() const

Computes the number of leading bits of this APInt that are equal to its sign bit.

void clearLowBits(unsigned loBits)

Set bottom loBits bits to 0.

uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const

If this value is smaller than the specified limit, return it, otherwise return the limit value.

void setAllBits()

Set every bit to 1.

APInt shl(unsigned shiftAmt) const

Left-shift function.

static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)

Constructs an APInt value that has the bottom loBitsSet bits set.

void setLowBits(unsigned loBits)

Set the bottom loBits bits.

LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const

Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).

static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)

Constructs an APInt value that has a contiguous range of bits set.

static APInt getOneBitSet(unsigned numBits, unsigned BitNo)

Return an APInt with exactly one bit set in the result.

bool uge(const APInt &RHS) const

Unsigned greater or equal comparison.

PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)

Get the result of an analysis pass for a given IR unit.

Represent the analysis usage information of a pass.

void setPreservesAll()

Set by analyses that do not transform their input at all.

Predicate

This enumeration lists the possible predicates for CmpInst subclasses.

This class represents a range of values.

static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)

Initialize a range based on a known bits constraint.

LLVM_ABI ConstantRange zeroExtend(uint32_t BitWidth) const

Return a new range in the specified integer type, which must be strictly larger than the current type...

LLVM_ABI APInt getSignedMin() const

Return the smallest signed value contained in the ConstantRange.

LLVM_ABI ConstantRange signExtend(uint32_t BitWidth) const

Return a new range in the specified integer type, which must be strictly larger than the current type...

LLVM_ABI APInt getSignedMax() const

Return the largest signed value contained in the ConstantRange.

uint32_t getBitWidth() const

Get the bit width of this ConstantRange.

Represents any generic load, including sign/zero extending variants.

const MDNode * getRanges() const

Returns the Ranges that describes the dereference.

static LLVM_ABI std::optional< GFConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)

To use KnownBitsInfo analysis in a pass, KnownBitsInfo &Info = getAnalysis<GISelValueTrackingInfoAnal...

GISelValueTracking & get(MachineFunction &MF)

Definition GISelValueTracking.cpp:2287

bool runOnMachineFunction(MachineFunction &MF) override

runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...

Definition GISelValueTracking.cpp:2282

void getAnalysisUsage(AnalysisUsage &AU) const override

getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...

Definition GISelValueTracking.cpp:2276

GISelValueTracking Result

LLVM_ABI Result run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)

Definition GISelValueTracking.cpp:2299

LLVM_ABI PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)

Definition GISelValueTracking.cpp:2305

unsigned getMaxDepth() const

KnownBits getKnownBits(Register R)

Definition GISelValueTracking.cpp:83

Align computeKnownAlignment(Register R, unsigned Depth=0)

Definition GISelValueTracking.cpp:55

std::optional< ConstantRange > getValidShiftAmountRange(Register R, const APInt &DemandedElts, unsigned Depth)

If a G_SHL/G_ASHR/G_LSHR node with shift operand R has shift amounts that are all less than the eleme...

Definition GISelValueTracking.cpp:2218

bool maskedValueIsZero(Register Val, const APInt &Mask)

std::optional< uint64_t > getValidMinimumShiftAmount(Register R, const APInt &DemandedElts, unsigned Depth=0)

If a G_SHL/G_ASHR/G_LSHR node with shift operand R has shift amounts that are all less than the eleme...

Definition GISelValueTracking.cpp:2268

bool signBitIsZero(Register Op)

Definition GISelValueTracking.cpp:101

const DataLayout & getDataLayout() const

unsigned computeNumSignBits(Register R, const APInt &DemandedElts, unsigned Depth=0)

Definition GISelValueTracking.cpp:1825

APInt getKnownOnes(Register R)

Definition GISelValueTracking.cpp:111

KnownBits getKnownBits(MachineInstr &MI)

Definition GISelValueTracking.cpp:77

APInt getKnownZeroes(Register R)

Definition GISelValueTracking.cpp:107

void computeKnownBitsImpl(Register R, KnownBits &Known, const APInt &DemandedElts, unsigned Depth=0)

Definition GISelValueTracking.cpp:159

Register getCondReg() const

Register getFalseReg() const

Register getTrueReg() const

Register getSrc2Reg() const

Register getSrc1Reg() const

ArrayRef< int > getMask() const

constexpr bool isScalableVector() const

Returns true if the LLT is a scalable vector.

constexpr unsigned getScalarSizeInBits() const

constexpr bool isValid() const

constexpr uint16_t getNumElements() const

Returns the number of elements in a vector LLT.

constexpr bool isVector() const

constexpr bool isFixedVector() const

Returns true if the LLT is a fixed vector.

constexpr LLT getScalarType() const

TypeSize getValue() const

void getAnalysisUsage(AnalysisUsage &AU) const override

getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.

DenormalMode getDenormalMode(const fltSemantics &FPType) const

Returns the denormal handling type for the default rounding mode of the function.

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

Function & getFunction()

Return the LLVM function that this machine code represents.

const TargetMachine & getTarget() const

getTarget - Return the target machine this machine code is compiled with

Representation of each machine instruction.

unsigned getOpcode() const

Returns the opcode of this MachineInstr.

const MachineOperand & getOperand(unsigned i) const

A description of a memory reference used in the backend.

LLT getMemoryType() const

Return the memory type of the memory reference.

const MDNode * getRanges() const

Return the range tag for the memory reference.

LocationSize getSizeInBits() const

Return the size in bits of the memory reference.

MachineOperand class - Representation of each machine instruction operand.

Register getReg() const

getReg - Returns the register number.

A set of analyses that are preserved following a run of a transformation pass.

static PreservedAnalyses all()

Construct a special preserved set that preserves all passes.

Wrapper class representing virtual and physical registers.

constexpr bool isVirtual() const

Return true if the specified register number is in the virtual register namespace.

@ ZeroOrOneBooleanContent

@ ZeroOrNegativeOneBooleanContent

CodeGenOptLevel getOptLevel() const

Returns the optimization level: None, Less, Default, or Aggressive.

LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const

Print the name of this Value out to the specified raw_ostream.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

@ C

The default llvm calling convention, compatible with C.

operand_type_match m_Reg()

operand_type_match m_Pred()

bind_ty< FPClassTest > m_FPClassTest(FPClassTest &T)

bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)

ClassifyOp_match< LHS, Test, TargetOpcode::G_IS_FPCLASS > m_GIsFPClass(const LHS &L, const Test &T)

Matches the register and immediate used in a fpclass test G_IS_FPCLASS val, 96.

CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_FCMP > m_GFCmp(const Pred &P, const LHS &L, const RHS &R)

This is an optimization pass for GlobalISel generic memory operations.

auto drop_begin(T &&RangeOrContainer, size_t N=1)

Return a range covering RangeOrContainer with the first N elements excluded.

LLVM_ABI std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)

If VReg is defined by a G_CONSTANT, return the corresponding value.

detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)

auto enumerate(FirstRange &&First, RestRanges &&...Rest)

Given two or more input ranges, returns a new range whose values are tuples (A, B,...

LLVM_ABI const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)

Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.

int bit_width(T Value)

Returns the number of bits needed to represent Value if Value is nonzero.

AnalysisManager< MachineFunction > MachineFunctionAnalysisManager

int ilogb(const APFloat &Arg)

Returns the exponent of the internal representation of the APFloat.

unsigned Log2_64(uint64_t Value)

Return the floor log base 2 of the specified value, -1 if the value is zero.

LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)

Parse out a conservative ConstantRange from !range metadata.

std::tuple< Value *, FPClassTest, FPClassTest > fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS, FPClassTest RHSClass, bool LookThroughSrc=true)

LLVM_ABI bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)

Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...

constexpr unsigned MaxAnalysisRecursionDepth

FPClassTest

Floating-point class tests, supported by 'is_fpclass' intrinsic.

LLVM_ABI raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

@ First

Helpers to iterate all locations in the MemoryEffectsBase class.

DWARFExpression::Operation Op

std::string toString(const APInt &I, unsigned Radix, bool Signed, bool formatAsCLiteral=false, bool UpperCase=true, bool InsertSeparators=false)

constexpr unsigned BitWidth

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

static uint32_t extractBits(uint64_t Val, uint32_t Hi, uint32_t Lo)

LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)

Compute known bits from the range metadata.

This struct is a compact representation of a valid (non-zero power of two) alignment.

A special type used by analysis passes to provide an address that identifies that particular analysis...

Represent subnormal handling kind for floating point instruction inputs and outputs.

DenormalModeKind Input

Denormal treatment kind for floating point instruction inputs in the default floating-point environme...

constexpr bool outputsAreZero() const

Return true if output denormals should be flushed to 0.

@ PositiveZero

Denormals are flushed to positive zero.

@ IEEE

IEEE-754 denormal numbers preserved.

constexpr bool inputsAreZero() const

Return true if input denormals must be implicitly treated as 0.

DenormalModeKind Output

Denormal flushing mode for floating point instruction results in the default floating point environme...

static constexpr DenormalMode getIEEE()

static KnownBits makeConstant(const APInt &C)

Create known bits from a known constant.

KnownBits anyextOrTrunc(unsigned BitWidth) const

Return known bits for an "any" extension or truncation of the value we're tracking.

LLVM_ABI KnownBits sextInReg(unsigned SrcBitWidth) const

Return known bits for a in-register sign extension of the value we're tracking.

static LLVM_ABI KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)

Compute known bits from zero-extended multiply-hi.

static LLVM_ABI KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)

Compute known bits for smax(LHS, RHS).

bool isNonNegative() const

Returns true if this value is known to be non-negative.

bool isZero() const

Returns true if value is all zero.

static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)

Compute known bits for ashr(LHS, RHS).

bool isUnknown() const

Returns true if we don't know any bits.

KnownBits trunc(unsigned BitWidth) const

Return known bits for a truncation of the value we're tracking.

KnownBits byteSwap() const

unsigned countMaxPopulation() const

Returns the maximum number of bits that could be one.

KnownBits reverseBits() const

unsigned getBitWidth() const

Get the bit width of this value.

static LLVM_ABI KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)

Compute known bits for umax(LHS, RHS).

KnownBits zext(unsigned BitWidth) const

Return known bits for a zero extension of the value we're tracking.

static LLVM_ABI KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)

Compute known bits for lshr(LHS, RHS).

KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const

Return a subset of the known bits from [bitPosition,bitPosition+numBits).

KnownBits intersectWith(const KnownBits &RHS) const

Returns KnownBits information that is known to be true for both this and RHS.

KnownBits sext(unsigned BitWidth) const

Return known bits for a sign extension of the value we're tracking.

static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)

Compute knownbits resulting from addition of LHS and RHS.

KnownBits zextOrTrunc(unsigned BitWidth) const

Return known bits for a zero extension or truncation of the value we're tracking.

APInt getMaxValue() const

Return the maximal unsigned value possible given these KnownBits.

static LLVM_ABI KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)

Compute known bits for smin(LHS, RHS).

static LLVM_ABI KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)

Compute known bits from sign-extended multiply-hi.

APInt getMinValue() const

Return the minimal unsigned value possible given these KnownBits.

bool isNegative() const

Returns true if this value is known to be negative.

static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)

Compute knownbits resulting from subtraction of LHS and RHS.

unsigned countMaxLeadingZeros() const

Returns the maximum number of leading zero bits possible.

void insertBits(const KnownBits &SubBits, unsigned BitPosition)

Insert the bits from a smaller known bits starting at bitPosition.

static LLVM_ABI KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)

Compute known bits resulting from multiplying LHS and RHS.

KnownBits anyext(unsigned BitWidth) const

Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...

LLVM_ABI KnownBits abs(bool IntMinIsPoison=false) const

Compute known bits for the absolute value.

static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)

Compute known bits for shl(LHS, RHS).

static LLVM_ABI KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)

Compute known bits for umin(LHS, RHS).

bool isAllOnes() const

Returns true if value is all one bits.

FPClassTest KnownFPClasses

Floating-point classes the value could be one of.

bool isKnownNeverInfinity() const

Return true if it's known this can never be an infinity.

bool cannotBeOrderedGreaterThanZero() const

Return true if we can prove that the analyzed floating-point value is either NaN or never greater tha...

static constexpr FPClassTest OrderedGreaterThanZeroMask

static constexpr FPClassTest OrderedLessThanZeroMask

void knownNot(FPClassTest RuleOut)

void copysign(const KnownFPClass &Sign)

bool isKnownNeverSubnormal() const

Return true if it's known this can never be a subnormal.

LLVM_ABI bool isKnownNeverLogicalZero(DenormalMode Mode) const

Return true if it's know this can never be interpreted as a zero.

bool isKnownNeverPosZero() const

Return true if it's known this can never be a literal positive zero.

std::optional< bool > SignBit

std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...

bool isKnownNeverNaN() const

Return true if it's known this can never be a nan.

bool isKnownNever(FPClassTest Mask) const

Return true if it's known this can never be one of the mask entries.

bool isKnownNeverNegZero() const

Return true if it's known this can never be a negative zero.

void propagateNaN(const KnownFPClass &Src, bool PreserveSign=false)

bool cannotBeOrderedLessThanZero() const

Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...

void signBitMustBeOne()

Assume the sign bit is one.

void signBitMustBeZero()

Assume the sign bit is zero.

LLVM_ABI bool isKnownNeverLogicalPosZero(DenormalMode Mode) const

Return true if it's know this can never be interpreted as a positive zero.

bool isKnownNeverPosInfinity() const

Return true if it's known this can never be +infinity.

LLVM_ABI bool isKnownNeverLogicalNegZero(DenormalMode Mode) const

Return true if it's know this can never be interpreted as a negative zero.