LLVM: include/llvm/CodeGen/BasicTTIImpl.h Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H

17#define LLVM_CODEGEN_BASICTTIIMPL_H

18

54#include

55#include

56#include

57#include

58#include

59#include

60

61namespace llvm {

62

69

71

72

73

74

75

76

77

78

79

80

81template

83private:

86

87

88 const T *thisT() const { return static_cast<const T *>(this); }

89

90

91

96

97

98 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,

99 CostKind, 0, nullptr, nullptr);

100

101 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {

102 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,

103 CostKind, i, nullptr, nullptr);

104 }

106 }

107

108

109

114

115

116

117

118

119

120

121 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {

122 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,

123 CostKind, i, nullptr, nullptr);

124 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,

125 CostKind, i, nullptr, nullptr);

126 }

128 }

129

130

131

134 int Index,

136 assert(VTy && SubVTy &&

137 "Can only extract subvectors from vectors");

140 (Index + NumSubElts) <=

142 "SK_ExtractSubvector index out of range");

143

145

146

147

148 for (int i = 0; i != NumSubElts; ++i) {

150 thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,

151 CostKind, i + Index, nullptr, nullptr);

152 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,

153 CostKind, i, nullptr, nullptr);

154 }

156 }

157

158

159

162 int Index,

164 assert(VTy && SubVTy &&

165 "Can only insert subvectors into vectors");

168 (Index + NumSubElts) <=

170 "SK_InsertSubvector index out of range");

171

173

174

175

176 for (int i = 0; i != NumSubElts; ++i) {

177 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,

178 CostKind, i, nullptr, nullptr);

180 thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, CostKind,

181 i + Index, nullptr, nullptr);

182 }

184 }

185

186

188 return static_cast<const T *>(this)->getST();

189 }

190

191

193 return static_cast<const T *>(this)->getTLI();

194 }

195

197 switch (M) {

208 }

210 }

211

212 InstructionCost getCommonMaskedMemoryOpCost(unsigned Opcode, Type *DataTy,

214 bool VariableMask,

215 bool IsGatherScatter,

218

221

223 unsigned VF = VT->getNumElements();

224

225

226

227

228

233 false, true, CostKind)

234 : 0;

235

236

238 VF * thisT()->getMemoryOpCost(Opcode, VT->getElementType(), Alignment,

240

241

244 Opcode == Instruction::Store, CostKind);

245

247 if (VariableMask) {

248

249

250

251

252

253

254 ConditionalCost =

257 false, true, CostKind) +

258 VF * (thisT()->getCFInstrCost(Instruction::Br, CostKind) +

259 thisT()->getCFInstrCost(Instruction::PHI, CostKind));

260 }

261

262 return AddrExtractCost + MemoryOpCost + PackingCost + ConditionalCost;

263 }

264

265

266

267

268

269

270 static bool isSplatMask(ArrayRef Mask, unsigned NumSrcElts, int &Index) {

271

272 bool IsCompared = false;

276 return P.index() != Mask.size() - 1 || IsCompared;

277 if (static_cast<unsigned>(P.value()) >= NumSrcElts * 2)

278 return false;

280 SplatIdx = P.value();

281 return P.index() != Mask.size() - 1;

282 }

283 IsCompared = true;

284 return SplatIdx == P.value();

285 })) {

286 Index = SplatIdx;

287 return true;

288 }

289 return false;

290 }

291

292

293

294

295

296

297

298

299

300

301

302

303 std::optional getMultipleResultIntrinsicVectorLibCallCost(

305 std::optional CallRetElementIndex = {}) const {

307

308 auto const *LibInfo = ICA.getLibInfo();

311 return std::nullopt;

312

314 EVT VT = getTLI()->getValueType(DL, Ty);

315

316 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;

317

318 switch (ICA.getID()) {

319 case Intrinsic::modf:

321 break;

322 case Intrinsic::sincospi:

324 break;

325 case Intrinsic::sincos:

327 break;

328 default:

329 return std::nullopt;

330 }

331

332

333 RTLIB::LibcallImpl LibcallImpl = getTLI()->getLibcallImpl(LC);

334 if (LibcallImpl == RTLIB::Unsupported)

335 return std::nullopt;

336

338

339

342

347 VecTy, {}, CostKind, 0, nullptr, {});

348 }

349

350

351

353 if (Idx == CallRetElementIndex)

354 continue;

355 Cost += thisT()->getMemoryOpCost(

356 Instruction::Load, VectorTy,

358 }

360 }

361

362

363

371 continue;

373 }

374 return FilteredTys;

375 }

376

377protected:

381

383

384public:

385

386

389 unsigned *Fast) const override {

391 return getTLI()->allowsMisalignedMemoryAccesses(

393 }

394

396 const Function *Callee) const override {

397 const TargetMachine &TM = getTLI()->getTargetMachine();

398

403

404

405

406 return (CallerBits & CalleeBits) == CalleeBits;

407 }

408

412

414

416

420

424

429

434

436 return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);

437 }

438

440 return getTLI()->getTargetMachine().getAssumedAddrSpace(V);

441 }

442

444 return getTLI()->getTargetMachine().Options.ThreadModel ==

446 }

447

448 std::pair<const Value *, unsigned>

450 return getTLI()->getTargetMachine().getPredicatedAddrSpace(V);

451 }

452

454 Value *NewV) const override {

455 return nullptr;

456 }

457

459 return getTLI()->isLegalAddImmediate(imm);

460 }

461

463 return getTLI()->isLegalAddScalableImmediate(Imm);

464 }

465

467 return getTLI()->isLegalICmpImmediate(imm);

468 }

469

471 bool HasBaseReg, int64_t Scale, unsigned AddrSpace,

473 int64_t ScalableOffset = 0) const override {

478 AM.Scale = Scale;

480 return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);

481 }

482

484 return getTLI()->getPreferredLargeGEPBaseOffset(MinOffset, MaxOffset);

485 }

486

488 Type *ScalarValTy) const override {

489 auto &&IsSupportedByTarget = [this, ScalarMemTy, ScalarValTy](unsigned VF) {

491 EVT VT = getTLI()->getValueType(DL, SrcTy);

492 if (getTLI()->isOperationLegal(ISD::STORE, VT) ||

493 getTLI()->isOperationCustom(ISD::STORE, VT))

494 return true;

495

496 EVT ValVT =

498 EVT LegalizedVT =

499 getTLI()->getTypeToTransformTo(ScalarMemTy->getContext(), VT);

500 return getTLI()->isTruncStoreLegal(LegalizedVT, ValVT);

501 };

502 while (VF > 2 && IsSupportedByTarget(VF))

503 VF /= 2;

504 return VF;

505 }

506

508 EVT VT = getTLI()->getValueType(DL, Ty, true);

509 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);

510 }

511

513 EVT VT = getTLI()->getValueType(DL, Ty, true);

514 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);

515 }

516

521

525

529

533

536 int64_t Scale,

537 unsigned AddrSpace) const override {

542 AM.Scale = Scale;

545 return 0;

547 }

548

550 return getTLI()->isTruncateFree(Ty1, Ty2);

551 }

552

554 return getTLI()->isProfitableToHoist(I);

555 }

556

557 bool useAA() const override { return getST()->useAA(); }

558

560 EVT VT = getTLI()->getValueType(DL, Ty, true);

561 return getTLI()->isTypeLegal(VT);

562 }

563

565 EVT ETy = getTLI()->getValueType(DL, Ty);

566 return getTLI()->getNumRegisters(Ty->getContext(), ETy);

567 }

568

574

578

579

580

581

582

583

584

585 unsigned N = SI.getNumCases();

588

589 JumpTableSize = 0;

590 bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());

591

592

593 if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N))

594 return N;

595

596 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();

597 APInt MinCaseVal = MaxCaseVal;

598 for (auto CI : SI.cases()) {

599 const APInt &CaseVal = CI.getCaseValue()->getValue();

600 if (CaseVal.sgt(MaxCaseVal))

601 MaxCaseVal = CaseVal;

602 if (CaseVal.slt(MinCaseVal))

603 MinCaseVal = CaseVal;

604 }

605

606

607 if (N <= DL.getIndexSizeInBits(0u)) {

609 for (auto I : SI.cases()) {

610 const BasicBlock *BB = I.getCaseSuccessor();

611 ++DestMap[BB];

612 }

613

615 return 1;

616 }

617

618

619 if (IsJTAllowed) {

620 if (N < 2 || N < TLI->getMinimumJumpTableEntries())

621 return N;

623 (MaxCaseVal - MinCaseVal)

624 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;

625

627 JumpTableSize = Range;

628 return 1;

629 }

630 }

631 return N;

632 }

633

639

641 const TargetMachine &TM = getTLI()->getTargetMachine();

642

644 return false;

645

646

647

648

651 return false;

652

655 return false;

656

657

658

660 return false;

661

662 return true;

663 }

664

671

673

683

685 const Function &Fn) const override {

687 default:

688 break;

689 case Instruction::SDiv:

690 case Instruction::SRem:

691 case Instruction::UDiv:

692 case Instruction::URem: {

694 return false;

695 EVT VT = getTLI()->getValueType(DL, Inst.getType());

696 return !getTLI()->isIntDivCheap(VT, Fn.getAttributes());

697 }

698 };

699

700 return false;

701 }

702

708 const AllocaInst *AI) const override {

709 return 0;

710 }

711

713

717

718

719

720

721

722

723

724

725

726

727

728

729

730

731

732

733

734

735

736

737

738

739

740 unsigned MaxOps;

744 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)

745 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;

746 else

747 return;

748

749

755 continue;

756 }

757

758 if (ORE) {

759 ORE->emit([&]() {

761 L->getHeader())

762 << "advising against unrolling the loop because it "

763 "contains a "

765 });

766 }

767 return;

768 }

769 }

770 }

771

772

773

776

777

780

781

782

784 }

785

793

799

803

807

812

813 std::optional<Instruction *>

817

818 std::optional<Value *>

821 bool &KnownBitsComputed) const override {

823 KnownBitsComputed);

824 }

825

828 APInt &UndefElts2, APInt &UndefElts3,

830 SimplifyAndSetOp) const override {

832 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,

833 SimplifyAndSetOp);

834 }

835

836 std::optional

838 return std::optional(

839 getST()->getCacheSize(static_cast<unsigned>(Level)));

840 }

841

842 std::optional

844 std::optional TargetResult =

845 getST()->getCacheAssociativity(static_cast<unsigned>(Level));

846

847 if (TargetResult)

848 return TargetResult;

849

851 }

852

854 return getST()->getCacheLineSize();

855 }

856

858 return getST()->getPrefetchDistance();

859 }

860

862 unsigned NumStridedMemAccesses,

863 unsigned NumPrefetches,

864 bool HasCall) const override {

865 return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,

866 NumPrefetches, HasCall);

867 }

868

870 return getST()->getMaxPrefetchIterationsAhead();

871 }

872

874 return getST()->enableWritePrefetching();

875 }

876

878 return getST()->shouldPrefetchAddressSpace(AS);

879 }

880

881

882

883

884

885

890

891 std::optional getMaxVScale() const override { return std::nullopt; }

893 return std::nullopt;

894 }

896

897

898

899

901 VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract,

904

905

909

911 (VL.empty() || VL.size() == Ty->getNumElements()) &&

912 "Vector size mismatch");

913

915

916 for (int i = 0, e = Ty->getNumElements(); i < e; ++i) {

917 if (!DemandedElts[i])

918 continue;

919 if (Insert) {

920 Value *InsertedVal = VL.empty() ? nullptr : VL[i];

921 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,

922 CostKind, i, nullptr, InsertedVal);

923 }

924 if (Extract)

925 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,

926 CostKind, i, nullptr, nullptr);

927 }

928

930 }

931

935

936 bool

938 unsigned ScalarOpdIdx) const override {

939 return false;

940 }

941

943 int OpdIdx) const override {

944 return OpdIdx == -1;

945 }

946

947 bool

949 int RetIdx) const override {

950 return RetIdx == 0;

951 }

952

953

955 bool Extract,

960

962 return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,

964 }

965

966

967

968

972 for (Type *Ty : Tys) {

973

974 if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&

975 !Ty->isPtrOrPtrVectorTy())

976 continue;

977

981 }

982

984 }

985

986

987

988

989

995 RetTy, true, false, CostKind);

996 if (!Args.empty())

998 filterConstantAndDuplicatedOperands(Args, Tys), CostKind);

999 else

1000

1001

1003 true, CostKind);

1004

1005 return Cost;

1006 }

1007

1008

1011 EVT MTy = getTLI()->getValueType(DL, Ty);

1012

1014

1015

1016

1017 while (true) {

1019

1021

1022

1025 }

1026

1029

1033

1034

1035 if (MTy == LK.second)

1037

1038

1039 MTy = LK.second;

1040 }

1041 }

1042

1044

1050 const Instruction *CxtI = nullptr) const override {

1051

1052 const TargetLoweringBase *TLI = getTLI();

1053 int ISD = TLI->InstructionOpcodeToISD(Opcode);

1054 assert(ISD && "Invalid opcode");

1055

1056

1059 Opd1Info, Opd2Info,

1060 Args, CxtI);

1061

1063

1065

1066

1068

1069 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {

1070

1071

1072 return LT.first * OpCost;

1073 }

1074

1075 if (!TLI->isOperationExpand(ISD, LT.second)) {

1076

1077

1078 return LT.first * 2 * OpCost;

1079 }

1080

1081

1082

1083

1085 bool IsSigned = ISD == ISD::SREM;

1087 LT.second) ||

1089 LT.second)) {

1090 unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;

1091 InstructionCost DivCost = thisT()->getArithmeticInstrCost(

1092 DivOpc, Ty, CostKind, Opd1Info, Opd2Info);

1094 thisT()->getArithmeticInstrCost(Instruction::Mul, Ty, CostKind);

1096 thisT()->getArithmeticInstrCost(Instruction::Sub, Ty, CostKind);

1097 return DivCost + MulCost + SubCost;

1098 }

1099 }

1100

1101

1104

1105

1106

1107

1111 Args, CxtI);

1112

1113

1117 }

1118

1119

1120 return OpCost;

1121 }

1122

1127 if (Mask.empty())

1128 return Kind;

1129 int NumDstElts = Mask.size();

1130 int NumSrcElts = SrcTy->getElementCount().getKnownMinValue();

1131 switch (Kind) {

1137 if (isSplatMask(Mask, NumSrcElts, Index))

1140 (Index + NumDstElts) <= NumSrcElts) {

1143 }

1144 break;

1145 }

1147 if (all_of(Mask, [NumSrcElts](int M) { return M < NumSrcElts; }))

1149 Index, SubTy);

1150 int NumSubElts;

1152 Mask, NumSrcElts, NumSubElts, Index)) {

1153 if (Index + NumSubElts > NumSrcElts)

1154 return Kind;

1157 }

1164 break;

1165 }

1173 break;

1174 }

1175 return Kind;

1176 }

1177

1182 const Instruction *CxtI = nullptr) const override {

1186 return getBroadcastShuffleOverhead(FVT, CostKind);

1195 return getPermuteShuffleOverhead(FVT, CostKind);

1198 return getExtractSubvectorOverhead(SrcTy, CostKind, Index,

1201 return getInsertSubvectorOverhead(DstTy, CostKind, Index,

1203 }

1205 }

1206

1210 const Instruction *I = nullptr) const override {

1212 return 0;

1213

1216 assert(ISD && "Invalid opcode");

1219

1220 TypeSize SrcSize = SrcLT.second.getSizeInBits();

1221 TypeSize DstSize = DstLT.second.getSizeInBits();

1222 bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();

1223 bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();

1224

1225 switch (Opcode) {

1226 default:

1227 break;

1228 case Instruction::Trunc:

1229

1230 if (TLI->isTruncateFree(SrcLT.second, DstLT.second))

1231 return 0;

1232 [[fallthrough]];

1233 case Instruction::BitCast:

1234

1235

1236 if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&

1237 SrcSize == DstSize)

1238 return 0;

1239 break;

1240 case Instruction::FPExt:

1241 if (I && getTLI()->isExtFree(I))

1242 return 0;

1243 break;

1244 case Instruction::ZExt:

1245 if (TLI->isZExtFree(SrcLT.second, DstLT.second))

1246 return 0;

1247 [[fallthrough]];

1248 case Instruction::SExt:

1249 if (I && getTLI()->isExtFree(I))

1250 return 0;

1251

1252

1253

1257 unsigned LType =

1259 if (DstLT.first == SrcLT.first &&

1261 return 0;

1262 }

1263 break;

1264 case Instruction::AddrSpaceCast:

1266 Dst->getPointerAddressSpace()))

1267 return 0;

1268 break;

1269 }

1270

1273

1274

1275 if (SrcLT.first == DstLT.first &&

1277 return SrcLT.first;

1278

1279

1280 if (!SrcVTy && !DstVTy) {

1281

1282

1284 return 1;

1285

1286

1287 return 4;

1288 }

1289

1290

1291 if (DstVTy && SrcVTy) {

1292

1293 if (SrcLT.first == DstLT.first && SrcSize == DstSize) {

1294

1295

1296 if (Opcode == Instruction::ZExt)

1297 return SrcLT.first;

1298

1299

1300 if (Opcode == Instruction::SExt)

1301 return SrcLT.first * 2;

1302

1303

1304

1305

1307 return SrcLT.first * 1;

1308 }

1309

1310

1311

1312

1313

1314 bool SplitSrc =

1317 bool SplitDst =

1320 if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isKnownEven() &&

1321 DstVTy->getElementCount().isKnownEven()) {

1324 const T *TTI = thisT();

1325

1327 (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0;

1328 return SplitCost +

1329 (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH,

1331 }

1332

1333

1336

1337

1338

1341 Opcode, Dst->getScalarType(), Src->getScalarType(), CCH, CostKind, I);

1342

1343

1344

1348 }

1349

1350

1351

1352

1353

1354 if (Opcode == Instruction::BitCast) {

1355

1358 : 0) +

1360 false, CostKind)

1361 : 0);

1362 }

1363

1365 }

1366

1369 unsigned Index,

1371 return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,

1372 CostKind, Index, nullptr, nullptr) +

1373 thisT()->getCastInstrCost(Opcode, Dst, VecTy->getElementType(),

1375 }

1376

1379 const Instruction *I = nullptr) const override {

1381 }

1382

1388 const Instruction *I = nullptr) const override {

1389 const TargetLoweringBase *TLI = getTLI();

1390 int ISD = TLI->InstructionOpcodeToISD(Opcode);

1391 assert(ISD && "Invalid opcode");

1392

1393 if (getTLI()->getValueType(DL, ValTy, true) == MVT::Other)

1395 Op1Info, Op2Info, I);

1396

1397

1399 assert(CondTy && "CondTy must exist");

1402 }

1404

1405 if (!(ValTy->isVectorTy() && LT.second.isVector()) &&

1406 !TLI->isOperationExpand(ISD, LT.second)) {

1407

1408

1409 return LT.first * 1;

1410 }

1411

1412

1413

1414

1418

1421 Opcode, ValVTy->getScalarType(), CondTy->getScalarType(), VecPred,

1423

1424

1425

1427 false, CostKind) +

1429 }

1430

1431

1432 return 1;

1433 }

1434

1437 unsigned Index, const Value *Op0,

1438 const Value *Op1) const override {

1440 }

1441

1442

1443

1444

1445

1448 unsigned Index, Value *Scalar,

1449 ArrayRef<std::tuple<Value *, User *, int>>

1450 ScalarUserAndIdx) const override {

1451 return thisT()->getVectorInstrCost(Opcode, Val, CostKind, Index, nullptr,

1452 nullptr);

1453 }

1454

1457 unsigned Index) const override {

1458 Value *Op0 = nullptr;

1459 Value *Op1 = nullptr;

1461 Op0 = IE->getOperand(0);

1462 Op1 = IE->getOperand(1);

1463 }

1464 return thisT()->getVectorInstrCost(I.getOpcode(), Val, CostKind, Index, Op0,

1465 Op1);

1466 }

1467

1471 unsigned Index) const override {

1472 unsigned NewIndex = -1;

1475 "Unexpected index from end of vector");

1476 NewIndex = FVTy->getNumElements() - 1 - Index;

1477 }

1478 return thisT()->getVectorInstrCost(Opcode, Val, CostKind, NewIndex, nullptr,

1479 nullptr);

1480 }

1481

1484 const APInt &DemandedDstElts,

1486 assert(DemandedDstElts.getBitWidth() == (unsigned)VF * ReplicationFactor &&

1487 "Unexpected size of DemandedDstElts.");

1488

1490

1493

1494

1495

1496

1497

1498

1499

1500

1501

1502

1503

1505 Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,

1506 false,

1507 true, CostKind);

1508 Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,

1509 true,

1510 false, CostKind);

1511

1512 return Cost;

1513 }

1514

1519 const Instruction *I = nullptr) const override {

1520 assert(!Src->isVoidTy() && "Invalid type");

1521

1522 if (getTLI()->getValueType(DL, Src, true) == MVT::Other)

1523 return 4;

1525

1526

1529 return Cost;

1530

1533

1534

1535

1537 LT.second.getSizeInBits())) {

1538

1539

1540

1543 if (Opcode == Instruction::Store)

1545 else

1547

1549

1550

1553 Opcode == Instruction::Store, CostKind);

1554 }

1555 }

1556

1557 return Cost;

1558 }

1559

1563 bool UseMaskForCond = false, bool UseMaskForGaps = false) const override {

1564

1565

1568

1570

1571 unsigned NumElts = VT->getNumElements();

1572 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");

1573

1574 unsigned NumSubElts = NumElts / Factor;

1576

1577

1579 if (UseMaskForCond || UseMaskForGaps) {

1580 unsigned IID = Opcode == Instruction::Load ? Intrinsic::masked_load

1581 : Intrinsic::masked_store;

1582 Cost = thisT()->getMemIntrinsicInstrCost(

1585 } else

1586 Cost = thisT()->getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace,

1588

1589

1590

1592 unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);

1593 unsigned VecTyLTSize = VecTyLT.getStoreSize();

1594

1595

1596

1597

1598

1599

1600

1601

1602

1603

1604

1605

1606

1607

1608

1609 if (Cost.isValid() && VecTySize > VecTyLTSize) {

1610

1611

1612 unsigned NumLegalInsts = divideCeil(VecTySize, VecTyLTSize);

1613

1614

1615

1616 unsigned NumEltsPerLegalInst = divideCeil(NumElts, NumLegalInsts);

1617

1618

1619 BitVector UsedInsts(NumLegalInsts, false);

1620 for (unsigned Index : Indices)

1621 for (unsigned Elt = 0; Elt < NumSubElts; ++Elt)

1622 UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst);

1623

1624

1625

1627 }

1628

1629

1631 "Interleaved memory op has too many members");

1632

1635

1637 for (unsigned Index : Indices) {

1638 assert(Index < Factor && "Invalid index for interleaved memory op");

1639 for (unsigned Elm = 0; Elm < NumSubElts; Elm++)

1640 DemandedLoadStoreElts.setBit(Index + Elm * Factor);

1641 }

1642

1643 if (Opcode == Instruction::Load) {

1644

1645

1646

1647

1648

1649

1650

1651

1652 InstructionCost InsSubCost = thisT()->getScalarizationOverhead(

1653 SubVT, DemandedAllSubElts,

1654 true, false, CostKind);

1655 Cost += Indices.size() * InsSubCost;

1656 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,

1657 false,

1658 true, CostKind);

1659 } else {

1660

1661

1662

1663

1664

1665

1666

1667

1668

1669

1670

1671

1672

1673 InstructionCost ExtSubCost = thisT()->getScalarizationOverhead(

1674 SubVT, DemandedAllSubElts,

1675 false, true, CostKind);

1676 Cost += ExtSubCost * Indices.size();

1677 Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,

1678 true,

1679 false, CostKind);

1680 }

1681

1682 if (!UseMaskForCond)

1683 return Cost;

1684

1686

1687 Cost += thisT()->getReplicationShuffleCost(

1688 I8Type, Factor, NumSubElts,

1689 UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,

1691

1692

1693

1694

1695

1696

1697 if (UseMaskForGaps) {

1699 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,

1701 }

1702

1703 return Cost;

1704 }

1705

1706

1710

1712 return 0;

1713

1714

1718

1719

1720

1721

1722

1723

1725 std::optional FOp =

1727 if (FOp) {

1728 if (ICA.getID() == Intrinsic::vp_load) {

1729 Align Alignment;

1731 Alignment = VPI->getPointerAlignment().valueOrOne();

1732 unsigned AS = 0;

1735 AS = PtrTy->getAddressSpace();

1736 return thisT()->getMemoryOpCost(*FOp, ICA.getReturnType(), Alignment,

1738 }

1739 if (ICA.getID() == Intrinsic::vp_store) {

1740 Align Alignment;

1742 Alignment = VPI->getPointerAlignment().valueOrOne();

1743 unsigned AS = 0;

1746 AS = PtrTy->getAddressSpace();

1747 return thisT()->getMemoryOpCost(*FOp, ICA.getArgTypes()[0], Alignment,

1749 }

1751 ICA.getID() == Intrinsic::vp_fneg) {

1752 return thisT()->getArithmeticInstrCost(*FOp, ICA.getReturnType(),

1754 }

1756 return thisT()->getCastInstrCost(

1759 }

1761

1765 return thisT()->getCmpSelInstrCost(*FOp, ICA.getArgTypes()[0],

1767 UI->getPredicate(), CostKind);

1768 }

1769 }

1770 }

1771 if (ICA.getID() == Intrinsic::vp_load_ff) {

1774 Align Alignment;

1776 Alignment = VPI->getPointerAlignment().valueOrOne();

1777 return thisT()->getMemIntrinsicInstrCost(

1780 }

1781 if (ICA.getID() == Intrinsic::vp_scatter) {

1788 }

1789 Align Alignment;

1791 Alignment = VPI->getPointerAlignment().valueOrOne();

1793 return thisT()->getMemIntrinsicInstrCost(

1796 VarMask, Alignment, nullptr),

1798 }

1799 if (ICA.getID() == Intrinsic::vp_gather) {

1806 }

1807 Align Alignment;

1809 Alignment = VPI->getPointerAlignment().valueOrOne();

1811 return thisT()->getMemIntrinsicInstrCost(

1814 VarMask, Alignment, nullptr),

1816 }

1817

1818 if (ICA.getID() == Intrinsic::vp_select ||

1819 ICA.getID() == Intrinsic::vp_merge) {

1824 }

1828 }

1829

1830 std::optionalIntrinsic::ID FID =

1832

1833

1834 if (ICA.getID() == Intrinsic::experimental_vp_reverse)

1835 FID = Intrinsic::vector_reverse;

1836

1837 if (FID) {

1838

1839

1841 "Expected VPIntrinsic to have Mask and Vector Length args and "

1842 "types");

1843

1848

1849

1850

1851

1853 *FID != Intrinsic::vector_reduce_fadd &&

1854 *FID != Intrinsic::vector_reduce_fmul) {

1858 }

1859

1862 return thisT()->getIntrinsicInstrCost(NewICA, CostKind);

1863 }

1864 }

1865

1868

1870

1873

1877 switch (IID) {

1878 default:

1879 break;

1880

1881 case Intrinsic::powi:

1883 bool ShouldOptForSize = I->getParent()->getParent()->hasOptSize();

1884 if (getTLI()->isBeneficialToExpandPowI(RHSC->getSExtValue(),

1885 ShouldOptForSize)) {

1886

1887

1889 unsigned ActiveBits = Exponent.getActiveBits();

1890 unsigned PopCount = Exponent.popcount();

1892 thisT()->getArithmeticInstrCost(

1893 Instruction::FMul, RetTy, CostKind);

1894 if (RHSC->isNegative())

1895 Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv, RetTy,

1897 return Cost;

1898 }

1899 }

1900 break;

1901 case Intrinsic::cttz:

1902

1903 if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCttz(RetTy))

1905 break;

1906

1907 case Intrinsic::ctlz:

1908

1909 if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCtlz(RetTy))

1911 break;

1912

1913 case Intrinsic::memcpy:

1914 return thisT()->getMemcpyCost(ICA.getInst());

1915

1916 case Intrinsic::masked_scatter: {

1917 const Value *Mask = Args[2];

1919 Align Alignment = I->getParamAlign(1).valueOrOne();

1920 return thisT()->getMemIntrinsicInstrCost(

1923 Alignment, I),

1925 }

1926 case Intrinsic::masked_gather: {

1927 const Value *Mask = Args[1];

1929 Align Alignment = I->getParamAlign(0).valueOrOne();

1930 return thisT()->getMemIntrinsicInstrCost(

1932 VarMask, Alignment, I),

1934 }

1935 case Intrinsic::masked_compressstore: {

1937 const Value *Mask = Args[2];

1938 Align Alignment = I->getParamAlign(1).valueOrOne();

1939 return thisT()->getMemIntrinsicInstrCost(

1941 Alignment, I),

1943 }

1944 case Intrinsic::masked_expandload: {

1945 const Value *Mask = Args[1];

1946 Align Alignment = I->getParamAlign(0).valueOrOne();

1947 return thisT()->getMemIntrinsicInstrCost(

1949 Alignment, I),

1951 }

1952 case Intrinsic::experimental_vp_strided_store: {

1954 const Value *Ptr = Args[1];

1955 const Value *Mask = Args[3];

1956 const Value *EVL = Args[4];

1959 Align Alignment =

1960 I->getParamAlign(1).value_or(thisT()->DL.getABITypeAlign(EltTy));

1961 return thisT()->getMemIntrinsicInstrCost(

1963 Alignment, I),

1965 }

1966 case Intrinsic::experimental_vp_strided_load: {

1967 const Value *Ptr = Args[0];

1968 const Value *Mask = Args[2];

1969 const Value *EVL = Args[3];

1972 Align Alignment =

1973 I->getParamAlign(0).value_or(thisT()->DL.getABITypeAlign(EltTy));

1974 return thisT()->getMemIntrinsicInstrCost(

1977 }

1978 case Intrinsic::stepvector: {

1981

1983 }

1984 case Intrinsic::vector_extract: {

1985

1986

1994 }

1995 case Intrinsic::vector_insert: {

1996

1997

2001 return thisT()->getShuffleCost(

2005 }

2006 case Intrinsic::vector_splice: {

2011 }

2012 case Intrinsic::vector_reduce_add:

2013 case Intrinsic::vector_reduce_mul:

2014 case Intrinsic::vector_reduce_and:

2015 case Intrinsic::vector_reduce_or:

2016 case Intrinsic::vector_reduce_xor:

2017 case Intrinsic::vector_reduce_smax:

2018 case Intrinsic::vector_reduce_smin:

2019 case Intrinsic::vector_reduce_fmax:

2020 case Intrinsic::vector_reduce_fmin:

2021 case Intrinsic::vector_reduce_fmaximum:

2022 case Intrinsic::vector_reduce_fminimum:

2023 case Intrinsic::vector_reduce_umax:

2024 case Intrinsic::vector_reduce_umin: {

2027 }

2028 case Intrinsic::vector_reduce_fadd:

2029 case Intrinsic::vector_reduce_fmul: {

2031 IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF, I, 1);

2033 }

2034 case Intrinsic::fshl:

2035 case Intrinsic::fshr: {

2036 const Value *X = Args[0];

2037 const Value *Y = Args[1];

2038 const Value *Z = Args[2];

2042

2043

2044

2047 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);

2049 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind);

2050 Cost += thisT()->getArithmeticInstrCost(

2051 BinaryOperator::Shl, RetTy, CostKind, OpInfoX,

2053 Cost += thisT()->getArithmeticInstrCost(

2054 BinaryOperator::LShr, RetTy, CostKind, OpInfoY,

2056

2057

2058

2060 Cost += thisT()->getArithmeticInstrCost(

2062 : BinaryOperator::URem,

2064 {TTI::OK_UniformConstantValue, TTI::OP_None});

2065

2066 if (X != Y) {

2069 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,

2072 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,

2074 }

2075 return Cost;

2076 }

2077 case Intrinsic::experimental_cttz_elts: {

2078 EVT ArgType = getTLI()->getValueType(DL, ICA.getArgTypes()[0], true);

2079

2080

2081

2082 if (!getTLI()->shouldExpandCttzElements(ArgType))

2084

2085

2086

2087

2088

2089

2094

2095 unsigned EltWidth = getTLI()->getBitWidthForCttzElements(

2098

2099

2102

2104 FMF);

2106 thisT()->getIntrinsicInstrCost(StepVecAttrs, CostKind);

2107

2109 thisT()->getArithmeticInstrCost(Instruction::Sub, NewVecTy, CostKind);

2110 Cost += thisT()->getCastInstrCost(Instruction::SExt, NewVecTy,

2114 thisT()->getArithmeticInstrCost(Instruction::And, NewVecTy, CostKind);

2115

2117 NewEltTy, NewVecTy, FMF, I, 1);

2118 Cost += thisT()->getTypeBasedIntrinsicInstrCost(ReducAttrs, CostKind);

2120 thisT()->getArithmeticInstrCost(Instruction::Sub, NewEltTy, CostKind);

2121

2122 return Cost;

2123 }

2124 case Intrinsic::get_active_lane_mask:

2125 case Intrinsic::experimental_vector_match:

2126 case Intrinsic::experimental_vector_histogram_add:

2127 case Intrinsic::experimental_vector_histogram_uadd_sat:

2128 case Intrinsic::experimental_vector_histogram_umax:

2129 case Intrinsic::experimental_vector_histogram_umin:

2130 return thisT()->getTypeBasedIntrinsicInstrCost(ICA, CostKind);

2131 case Intrinsic::modf:

2132 case Intrinsic::sincos:

2133 case Intrinsic::sincospi: {

2134 std::optional CallRetElementIndex;

2135

2136

2137 if (ICA.getID() == Intrinsic::modf)

2138 CallRetElementIndex = 0;

2139

2140 if (auto Cost = getMultipleResultIntrinsicVectorLibCallCost(

2141 ICA, CostKind, CallRetElementIndex))

2142 return *Cost;

2143

2144 break;

2145 }

2146 }

2147

2148

2149

2150

2153 ScalarizationCost = 0;

2158 true, false, CostKind);

2159 }

2160 }

2162 filterConstantAndDuplicatedOperands(Args, ICA.getArgTypes()),

2164 }

2165

2167 ScalarizationCost);

2168 return thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);

2169 }

2170

2171

2172

2173

2174

2184

2186 if (!Tys.empty()) {

2187

2188

2189 unsigned VecTyIndex = 0;

2190 if (IID == Intrinsic::vector_reduce_fadd ||

2191 IID == Intrinsic::vector_reduce_fmul)

2192 VecTyIndex = 1;

2193 assert(Tys.size() > VecTyIndex && "Unexpected IntrinsicCostAttributes");

2195 }

2196

2197

2199 unsigned ISD = 0;

2200 switch (IID) {

2201 default: {

2202

2205 }))

2207

2208

2210 SkipScalarizationCost ? ScalarizationCostPassed : 0;

2211 unsigned ScalarCalls = 1;

2212 Type *ScalarRetTy = RetTy;

2214 if (!SkipScalarizationCost)

2216 RetVTy, true, false, CostKind);

2217 ScalarCalls = std::max(ScalarCalls,

2220 }

2222 for (Type *Ty : Tys) {

2224 if (!SkipScalarizationCost)

2226 VTy, false, true, CostKind);

2227 ScalarCalls = std::max(ScalarCalls,

2229 Ty = Ty->getScalarType();

2230 }

2232 }

2233 if (ScalarCalls == 1)

2234 return 1;

2235

2238 thisT()->getIntrinsicInstrCost(ScalarAttrs, CostKind);

2239

2240 return ScalarCalls * ScalarCost + ScalarizationCost;

2241 }

2242

2243

2244 case Intrinsic::sqrt:

2245 ISD = ISD::FSQRT;

2246 break;

2247 case Intrinsic::sin:

2248 ISD = ISD::FSIN;

2249 break;

2250 case Intrinsic::cos:

2251 ISD = ISD::FCOS;

2252 break;

2253 case Intrinsic::sincos:

2254 ISD = ISD::FSINCOS;

2255 break;

2256 case Intrinsic::sincospi:

2257 ISD = ISD::FSINCOSPI;

2258 break;

2259 case Intrinsic::modf:

2260 ISD = ISD::FMODF;

2261 break;

2262 case Intrinsic::tan:

2263 ISD = ISD::FTAN;

2264 break;

2265 case Intrinsic::asin:

2266 ISD = ISD::FASIN;

2267 break;

2268 case Intrinsic::acos:

2269 ISD = ISD::FACOS;

2270 break;

2271 case Intrinsic::atan:

2272 ISD = ISD::FATAN;

2273 break;

2274 case Intrinsic::atan2:

2275 ISD = ISD::FATAN2;

2276 break;

2277 case Intrinsic::sinh:

2278 ISD = ISD::FSINH;

2279 break;

2280 case Intrinsic::cosh:

2281 ISD = ISD::FCOSH;

2282 break;

2283 case Intrinsic::tanh:

2284 ISD = ISD::FTANH;

2285 break;

2286 case Intrinsic::exp:

2287 ISD = ISD::FEXP;

2288 break;

2289 case Intrinsic::exp2:

2290 ISD = ISD::FEXP2;

2291 break;

2292 case Intrinsic::exp10:

2293 ISD = ISD::FEXP10;

2294 break;

2295 case Intrinsic:🪵

2296 ISD = ISD::FLOG;

2297 break;

2298 case Intrinsic::log10:

2299 ISD = ISD::FLOG10;

2300 break;

2301 case Intrinsic::log2:

2302 ISD = ISD::FLOG2;

2303 break;

2304 case Intrinsic::ldexp:

2305 ISD = ISD::FLDEXP;

2306 break;

2307 case Intrinsic::fabs:

2308 ISD = ISD::FABS;

2309 break;

2310 case Intrinsic::canonicalize:

2312 break;

2313 case Intrinsic::minnum:

2314 ISD = ISD::FMINNUM;

2315 break;

2316 case Intrinsic::maxnum:

2317 ISD = ISD::FMAXNUM;

2318 break;

2319 case Intrinsic::minimum:

2320 ISD = ISD::FMINIMUM;

2321 break;

2322 case Intrinsic::maximum:

2323 ISD = ISD::FMAXIMUM;

2324 break;

2325 case Intrinsic::minimumnum:

2326 ISD = ISD::FMINIMUMNUM;

2327 break;

2328 case Intrinsic::maximumnum:

2329 ISD = ISD::FMAXIMUMNUM;

2330 break;

2331 case Intrinsic::copysign:

2333 break;

2334 case Intrinsic:🤣

2335 ISD = ISD::FFLOOR;

2336 break;

2337 case Intrinsic::ceil:

2338 ISD = ISD::FCEIL;

2339 break;

2340 case Intrinsic::trunc:

2341 ISD = ISD::FTRUNC;

2342 break;

2343 case Intrinsic::nearbyint:

2344 ISD = ISD::FNEARBYINT;

2345 break;

2346 case Intrinsic::rint:

2347 ISD = ISD::FRINT;

2348 break;

2349 case Intrinsic::lrint:

2350 ISD = ISD::LRINT;

2351 break;

2352 case Intrinsic::llrint:

2353 ISD = ISD::LLRINT;

2354 break;

2355 case Intrinsic::round:

2356 ISD = ISD::FROUND;

2357 break;

2358 case Intrinsic::roundeven:

2359 ISD = ISD::FROUNDEVEN;

2360 break;

2361 case Intrinsic::lround:

2362 ISD = ISD::LROUND;

2363 break;

2364 case Intrinsic::llround:

2365 ISD = ISD::LLROUND;

2366 break;

2367 case Intrinsic::pow:

2368 ISD = ISD::FPOW;

2369 break;

2370 case Intrinsic::fma:

2372 break;

2373 case Intrinsic::fmuladd:

2375 break;

2376 case Intrinsic::experimental_constrained_fmuladd:

2378 break;

2379

2380 case Intrinsic::lifetime_start:

2381 case Intrinsic::lifetime_end:

2382 case Intrinsic::sideeffect:

2383 case Intrinsic::pseudoprobe:

2384 case Intrinsic::arithmetic_fence:

2385 return 0;

2386 case Intrinsic::masked_store: {

2387 Type *Ty = Tys[0];

2388 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);

2389 return thisT()->getMemIntrinsicInstrCost(

2391 }

2392 case Intrinsic::masked_load: {

2393 Type *Ty = RetTy;

2394 Align TyAlign = thisT()->DL.getABITypeAlign(Ty);

2395 return thisT()->getMemIntrinsicInstrCost(

2397 }

2398 case Intrinsic::experimental_vp_strided_store: {

2400 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());

2401 return thisT()->getMemIntrinsicInstrCost(

2403 true, Alignment,

2406 }

2407 case Intrinsic::experimental_vp_strided_load: {

2409 Align Alignment = thisT()->DL.getABITypeAlign(Ty->getElementType());

2410 return thisT()->getMemIntrinsicInstrCost(

2412 true, Alignment,

2415 }

2416 case Intrinsic::vector_reduce_add:

2417 case Intrinsic::vector_reduce_mul:

2418 case Intrinsic::vector_reduce_and:

2419 case Intrinsic::vector_reduce_or:

2420 case Intrinsic::vector_reduce_xor:

2421 return thisT()->getArithmeticReductionCost(

2424 case Intrinsic::vector_reduce_fadd:

2425 case Intrinsic::vector_reduce_fmul:

2426 return thisT()->getArithmeticReductionCost(

2428 case Intrinsic::vector_reduce_smax:

2429 case Intrinsic::vector_reduce_smin:

2430 case Intrinsic::vector_reduce_umax:

2431 case Intrinsic::vector_reduce_umin:

2432 case Intrinsic::vector_reduce_fmax:

2433 case Intrinsic::vector_reduce_fmin:

2434 case Intrinsic::vector_reduce_fmaximum:

2435 case Intrinsic::vector_reduce_fminimum:

2438 case Intrinsic::experimental_vector_match: {

2441 unsigned SearchSize = NeedleTy->getNumElements();

2442

2443

2444

2445 EVT SearchVT = getTLI()->getValueType(DL, SearchTy);

2446 if (!getTLI()->shouldExpandVectorMatch(SearchVT, SearchSize))

2448

2449

2450

2452 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, NeedleTy,

2453 CostKind, 1, nullptr, nullptr);

2454 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SearchTy,

2455 CostKind, 0, nullptr, nullptr);

2458 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SearchTy, RetTy,

2461 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);

2462 Cost *= SearchSize;

2464 thisT()->getArithmeticInstrCost(BinaryOperator::And, RetTy, CostKind);

2465 return Cost;

2466 }

2467 case Intrinsic::vector_reverse:

2471 case Intrinsic::experimental_vector_histogram_add:

2472 case Intrinsic::experimental_vector_histogram_uadd_sat:

2473 case Intrinsic::experimental_vector_histogram_umax:

2474 case Intrinsic::experimental_vector_histogram_umin: {

2477

2478

2479 if (!PtrsTy)

2481

2482 Align Alignment = thisT()->DL.getABITypeAlign(EltTy);

2484 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, PtrsTy,

2485 CostKind, 1, nullptr, nullptr);

2486 Cost += thisT()->getMemoryOpCost(Instruction::Load, EltTy, Alignment, 0,

2488 switch (IID) {

2489 default:

2491 case Intrinsic::experimental_vector_histogram_add:

2493 thisT()->getArithmeticInstrCost(Instruction::Add, EltTy, CostKind);

2494 break;

2495 case Intrinsic::experimental_vector_histogram_uadd_sat: {

2497 Cost += thisT()->getIntrinsicInstrCost(UAddSat, CostKind);

2498 break;

2499 }

2500 case Intrinsic::experimental_vector_histogram_umax: {

2503 break;

2504 }

2505 case Intrinsic::experimental_vector_histogram_umin: {

2508 break;

2509 }

2510 }

2511 Cost += thisT()->getMemoryOpCost(Instruction::Store, EltTy, Alignment, 0,

2514 return Cost;

2515 }

2516 case Intrinsic::get_active_lane_mask: {

2518 EVT ResVT = getTLI()->getValueType(DL, RetTy, true);

2519 EVT ArgVT = getTLI()->getValueType(DL, ArgTy, true);

2520

2521

2522

2523 if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgVT))

2525

2526

2527

2528 Type *ExpRetTy =

2532 thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);

2533 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy, RetTy,

2535 return Cost;

2536 }

2537 case Intrinsic::experimental_memset_pattern:

2538

2539

2540

2542 case Intrinsic::abs:

2544 break;

2545 case Intrinsic::fshl:

2547 break;

2548 case Intrinsic::fshr:

2550 break;

2551 case Intrinsic::smax:

2553 break;

2554 case Intrinsic::smin:

2556 break;

2557 case Intrinsic::umax:

2559 break;

2560 case Intrinsic::umin:

2562 break;

2563 case Intrinsic::sadd_sat:

2565 break;

2566 case Intrinsic::ssub_sat:

2568 break;

2569 case Intrinsic::uadd_sat:

2571 break;

2572 case Intrinsic::usub_sat:

2574 break;

2575 case Intrinsic::smul_fix:

2577 break;

2578 case Intrinsic::umul_fix:

2580 break;

2581 case Intrinsic::sadd_with_overflow:

2583 break;

2584 case Intrinsic::ssub_with_overflow:

2586 break;

2587 case Intrinsic::uadd_with_overflow:

2589 break;

2590 case Intrinsic::usub_with_overflow:

2592 break;

2593 case Intrinsic::smul_with_overflow:

2595 break;

2596 case Intrinsic::umul_with_overflow:

2598 break;

2599 case Intrinsic::fptosi_sat:

2600 case Intrinsic::fptoui_sat: {

2603

2604

2605

2606 if (!SrcLT.first.isValid() || !RetLT.first.isValid())

2610 break;

2611 }

2612 case Intrinsic::ctpop:

2614

2615

2617 break;

2618 case Intrinsic::ctlz:

2620 break;

2621 case Intrinsic::cttz:

2623 break;

2624 case Intrinsic::bswap:

2626 break;

2627 case Intrinsic::bitreverse:

2629 break;

2630 case Intrinsic::ucmp:

2632 break;

2633 case Intrinsic::scmp:

2635 break;

2636 }

2637

2639 Type *LegalizeTy = ST ? ST->getContainedType(0) : RetTy;

2641

2643

2645 if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&

2647 return 0;

2648 }

2649

2650

2651

2652

2653

2654 if (LT.first > 1)

2655 return (LT.first * 2);

2656 else

2657 return (LT.first * 1);

2659

2660

2661 return (LT.first * 2);

2662 }

2663

2664 switch (IID) {

2665 case Intrinsic::fmuladd: {

2666

2667

2668

2669 return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy,

2671 thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy,

2673 }

2674 case Intrinsic::experimental_constrained_fmuladd: {

2676 Intrinsic::experimental_constrained_fmul, RetTy, Tys);

2678 Intrinsic::experimental_constrained_fadd, RetTy, Tys);

2679 return thisT()->getIntrinsicInstrCost(FMulAttrs, CostKind) +

2680 thisT()->getIntrinsicInstrCost(FAddAttrs, CostKind);

2681 }

2682 case Intrinsic::smin:

2683 case Intrinsic::smax:

2684 case Intrinsic::umin:

2685 case Intrinsic::umax: {

2686

2688 bool IsUnsigned = IID == Intrinsic::umax || IID == Intrinsic::umin;

2692 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,

2694 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,

2696 return Cost;

2697 }

2698 case Intrinsic::sadd_with_overflow:

2699 case Intrinsic::ssub_with_overflow: {

2702 unsigned Opcode = IID == Intrinsic::sadd_with_overflow

2703 ? BinaryOperator::Add

2704 : BinaryOperator::Sub;

2705

2706

2707

2708

2709

2711 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);

2713 2 * thisT()->getCmpSelInstrCost(Instruction::ICmp, SumTy, OverflowTy,

2715 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,

2717 return Cost;

2718 }

2719 case Intrinsic::uadd_with_overflow:

2720 case Intrinsic::usub_with_overflow: {

2723 unsigned Opcode = IID == Intrinsic::uadd_with_overflow

2724 ? BinaryOperator::Add

2725 : BinaryOperator::Sub;

2729

2731 Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);

2732 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy,

2733 OverflowTy, Pred, CostKind);

2734 return Cost;

2735 }

2736 case Intrinsic::smul_with_overflow:

2737 case Intrinsic::umul_with_overflow: {

2742 bool IsSigned = IID == Intrinsic::smul_with_overflow;

2743

2744 unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;

2746

2748 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH, CostKind);

2750 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);

2751 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,

2753 Cost += thisT()->getArithmeticInstrCost(

2756

2757 if (IsSigned)

2758 Cost += thisT()->getArithmeticInstrCost(

2759 Instruction::AShr, MulTy, CostKind,

2762

2763 Cost += thisT()->getCmpSelInstrCost(

2765 return Cost;

2766 }

2767 case Intrinsic::sadd_sat:

2768 case Intrinsic::ssub_sat: {

2769

2771

2773 Intrinsic::ID OverflowOp = IID == Intrinsic::sadd_sat

2774 ? Intrinsic::sadd_with_overflow

2775 : Intrinsic::ssub_with_overflow;

2777

2778

2779

2782 nullptr, ScalarizationCostPassed);

2783 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);

2784 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,

2786 Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy,

2788 return Cost;

2789 }

2790 case Intrinsic::uadd_sat:

2791 case Intrinsic::usub_sat: {

2793

2795 Intrinsic::ID OverflowOp = IID == Intrinsic::uadd_sat

2796 ? Intrinsic::uadd_with_overflow

2797 : Intrinsic::usub_with_overflow;

2798

2801 nullptr, ScalarizationCostPassed);

2802 Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);

2804 thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,

2806 return Cost;

2807 }

2808 case Intrinsic::smul_fix:

2809 case Intrinsic::umul_fix: {

2812

2813 unsigned ExtOp =

2814 IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;

2816

2818 Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CCH, CostKind);

2820 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);

2821 Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy,

2823 Cost += thisT()->getArithmeticInstrCost(

2826 Cost += thisT()->getArithmeticInstrCost(

2829 Cost += thisT()->getArithmeticInstrCost(Instruction::Or, RetTy, CostKind);

2830 return Cost;

2831 }

2832 case Intrinsic::abs: {

2833

2837 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,

2839 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,

2841

2842 Cost += thisT()->getArithmeticInstrCost(

2843 BinaryOperator::Sub, RetTy, CostKind,

2845 return Cost;

2846 }

2847 case Intrinsic::fshl:

2848 case Intrinsic::fshr: {

2849

2850

2854 thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);

2856 thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind);

2858 thisT()->getArithmeticInstrCost(BinaryOperator::Shl, RetTy, CostKind);

2859 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::LShr, RetTy,

2861

2862

2863

2864 Cost += thisT()->getArithmeticInstrCost(

2866 : BinaryOperator::URem,

2867 RetTy, CostKind, {TTI::OK_AnyValue, TTI::OP_None},

2868 {TTI::OK_UniformConstantValue, TTI::OP_None});

2869

2870 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,

2872 Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,

2874 return Cost;

2875 }

2876 case Intrinsic::fptosi_sat:

2877 case Intrinsic::fptoui_sat: {

2878 if (Tys.empty())

2879 break;

2880 Type *FromTy = Tys[0];

2881 bool IsSigned = IID == Intrinsic::fptosi_sat;

2882

2885 {FromTy, FromTy});

2886 Cost += thisT()->getIntrinsicInstrCost(Attrs1, CostKind);

2888 {FromTy, FromTy});

2889 Cost += thisT()->getIntrinsicInstrCost(Attrs2, CostKind);

2890 Cost += thisT()->getCastInstrCost(

2891 IsSigned ? Instruction::FPToSI : Instruction::FPToUI, RetTy, FromTy,

2893 if (IsSigned) {

2895 Cost += thisT()->getCmpSelInstrCost(

2897 Cost += thisT()->getCmpSelInstrCost(

2899 }

2900 return Cost;

2901 }

2902 case Intrinsic::ucmp:

2903 case Intrinsic::scmp: {

2904 Type *CmpTy = Tys[0];

2907 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,

2910 thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, CmpTy, CondTy,

2913

2916

2917 Cost += 2 * thisT()->getCmpSelInstrCost(

2918 BinaryOperator::Select, RetTy, CondTy,

2920 } else {

2921

2923 2 * thisT()->getCastInstrCost(CastInst::ZExt, RetTy, CondTy,

2925 Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy,

2927 }

2928 return Cost;

2929 }

2930 case Intrinsic::maximumnum:

2931 case Intrinsic::minimumnum: {

2932

2933

2934

2935

2936

2937

2938

2939

2940 int IeeeISD =

2941 IID == Intrinsic::maximumnum ? ISD::FMAXNUM_IEEE : ISD::FMINNUM_IEEE;

2944 RetTy, Tys[0]);

2946 thisT()->getIntrinsicInstrCost(FCanonicalizeAttrs, CostKind);

2947 return LT.first + FCanonicalizeCost * 2;

2948 }

2949 break;

2950 }

2951 default:

2952 break;

2953 }

2954

2955

2956

2957

2960

2961

2965

2966 InstructionCost ScalarizationCost = ScalarizationCostPassed;

2967 if (!SkipScalarizationCost) {

2968 ScalarizationCost = 0;

2969 for (Type *RetVTy : RetVTys) {

2972 false, CostKind);

2973 }

2974 }

2975

2978 for (Type *Ty : Tys) {

2979 if (Ty->isVectorTy())

2980 Ty = Ty->getScalarType();

2982 }

2985 thisT()->getIntrinsicInstrCost(Attrs, CostKind);

2986 for (Type *Ty : Tys) {

2990 VTy, false, true, CostKind);

2991 ScalarCalls = std::max(ScalarCalls,

2993 }

2994 }

2995 return ScalarCalls * ScalarCost + ScalarizationCost;

2996 }

2997

2998

2999 return SingleCallCost;

3000 }

3001

3002

3006 unsigned Id = MICA.getID();

3010

3011 switch (Id) {

3012 case Intrinsic::experimental_vp_strided_load:

3013 case Intrinsic::experimental_vp_strided_store: {

3014 unsigned Opcode = Id == Intrinsic::experimental_vp_strided_load

3015 ? Instruction::Load

3016 : Instruction::Store;

3017

3018

3019

3020 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,

3021 VariableMask, true, CostKind);

3022 }

3023 case Intrinsic::masked_scatter:

3024 case Intrinsic::masked_gather:

3025 case Intrinsic::vp_scatter:

3026 case Intrinsic::vp_gather: {

3027 unsigned Opcode = (MICA.getID() == Intrinsic::masked_gather ||

3028 MICA.getID() == Intrinsic::vp_gather)

3029 ? Instruction::Load

3030 : Instruction::Store;

3031

3032 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,

3033 VariableMask, true, CostKind);

3034 }

3035 case Intrinsic::vp_load:

3036 case Intrinsic::vp_store:

3038 case Intrinsic::masked_load:

3039 case Intrinsic::masked_store: {

3040 unsigned Opcode =

3041 Id == Intrinsic::masked_load ? Instruction::Load : Instruction::Store;

3042

3043 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, true, false,

3045 }

3046 case Intrinsic::masked_compressstore:

3047 case Intrinsic::masked_expandload: {

3048 unsigned Opcode = MICA.getID() == Intrinsic::masked_expandload

3049 ? Instruction::Load

3050 : Instruction::Store;

3051

3052

3053 return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment,

3054 VariableMask,

3055 true, CostKind);

3056 }

3057 case Intrinsic::vp_load_ff:

3059 default:

3061 }

3062 }

3063

3064

3065

3066

3067

3068

3069

3070

3071

3072

3073

3074

3080

3083 if (!LT.first.isValid())

3084 return 0;

3085

3086

3088 Tp && LT.second.isFixedLengthVector() &&

3092 SubTp && SubTp->getElementType() == FTp->getElementType())

3093 return divideCeil(FTp->getNumElements(), SubTp->getNumElements());

3094 }

3095 return LT.first.getValue();

3096 }

3097

3103

3104

3105

3106

3107

3108

3109

3110

3111

3112

3113

3114

3115

3116

3117

3118

3119

3120

3121

3122

3123

3124

3127

3128

3131

3132 Type *ScalarTy = Ty->getElementType();

3134 if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&

3136 NumVecElts >= 2) {

3137

3138

3139

3140

3141

3142

3144 return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,

3146 thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,

3149 }

3150 unsigned NumReduxLevels = Log2_32(NumVecElts);

3153 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);

3154 unsigned LongVectorCount = 0;

3155 unsigned MVTLen =

3156 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;

3157 while (NumVecElts > MVTLen) {

3158 NumVecElts /= 2;

3160 ShuffleCost += thisT()->getShuffleCost(

3162 ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy, CostKind);

3163 Ty = SubTy;

3164 ++LongVectorCount;

3165 }

3166

3167 NumReduxLevels -= LongVectorCount;

3168

3169

3170

3171

3172

3173

3174

3175 ShuffleCost +=

3178 ArithCost +=

3179 NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty, CostKind);

3180 return ShuffleCost + ArithCost +

3181 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,

3182 CostKind, 0, nullptr, nullptr);

3183 }

3184

3185

3186

3187

3188

3189

3190

3191

3192

3193

3194

3195

3196

3197

3198

3199

3200

3203

3204

3207

3210 VTy, false, true, CostKind);

3211 InstructionCost ArithCost = thisT()->getArithmeticInstrCost(

3214

3215 return ExtractCost + ArithCost;

3216 }

3217

3220 std::optional FMF,

3222 assert(Ty && "Unknown reduction vector type");

3226 }

3227

3228

3229

3233

3234

3237

3238 Type *ScalarTy = Ty->getElementType();

3240 unsigned NumReduxLevels = Log2_32(NumVecElts);

3243 std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);

3244 unsigned LongVectorCount = 0;

3245 unsigned MVTLen =

3246 LT.second.isVector() ? LT.second.getVectorNumElements() : 1;

3247 while (NumVecElts > MVTLen) {

3248 NumVecElts /= 2;

3250

3251 ShuffleCost += thisT()->getShuffleCost(

3253

3256 Ty = SubTy;

3257 ++LongVectorCount;

3258 }

3259

3260 NumReduxLevels -= LongVectorCount;

3261

3262

3263

3264

3265

3266 ShuffleCost +=

3271

3272

3273 return ShuffleCost + MinMaxCost +

3274 thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,

3275 CostKind, 0, nullptr, nullptr);

3276 }

3277

3280 VectorType *Ty, std::optional FMF,

3283 FTy && IsUnsigned && Opcode == Instruction::Add &&

3285

3286

3287 auto *IntTy =

3291 return thisT()->getCastInstrCost(Instruction::BitCast, IntTy, FTy,

3293 thisT()->getIntrinsicInstrCost(ICA, CostKind);

3294 }

3295

3296

3299 thisT()->getArithmeticReductionCost(Opcode, ExtTy, FMF, CostKind);

3301 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,

3303

3304 return RedCost + ExtCost;

3305 }

3306

3311

3312

3313

3314 assert((RedOpcode == Instruction::Add || RedOpcode == Instruction::Sub) &&

3315 "The reduction opcode is expected to be Add or Sub.");

3317 InstructionCost RedCost = thisT()->getArithmeticReductionCost(

3318 RedOpcode, ExtTy, std::nullopt, CostKind);

3320 IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,

3322

3324 thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);

3325

3326 return RedCost + MulCost + 2 * ExtCost;

3327 }

3328

3330

3331

3332};

3333

3334

3335

3338

3340

3343

3346

3347public:

3349};

3350

3351}

3352

3353#endif

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

This file implements a class to represent arbitrary precision integral constant values and operations...

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

This file implements the BitVector class.

static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")

This file contains the declarations for the subclasses of Constant, which represent the different fla...

static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))

const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]

static const Function * getCalledFunction(const Value *V)

ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))

uint64_t IntrinsicInst * II

static unsigned getNumElements(Type *Ty)

static Type * getValueType(Value *V)

Returns the type of the given value/instruction V.

This file defines the SmallPtrSet class.

This file defines the SmallVector class.

static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")

static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")

static SymbolRef::Type getType(const Symbol *Sym)

This file describes how to lower LLVM code to machine code.

This file provides helpers for the implementation of a TargetTransformInfo-conforming class.

This pass exposes codegen information to IR-level passes.

Class for arbitrary precision integers.

static APInt getAllOnes(unsigned numBits)

Return an APInt of a specified width with all bits set.

void setBit(unsigned BitPosition)

Set the given bit to 1 whose position is given as "bitPosition".

bool sgt(const APInt &RHS) const

Signed greater than comparison.

unsigned getBitWidth() const

Return the number of bits in the APInt.

bool slt(const APInt &RHS) const

Signed less than comparison.

static APInt getZero(unsigned numBits)

Get the '0' value for the specified bit-width.

an instruction to allocate memory on the stack

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

ArrayRef< T > drop_front(size_t N=1) const

Drop the first N elements of the array.

size_t size() const

size - Get the array size.

ArrayRef< T > drop_back(size_t N=1) const

Drop the last N elements of the array.

A cache of @llvm.assume calls within a function.

LLVM Basic Block Representation.

InstructionCost getFPOpCost(Type *Ty) const override

Definition BasicTTIImpl.h:674

bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const override

Definition BasicTTIImpl.h:684

InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond=false, bool UseMaskForGaps=false) const override

Definition BasicTTIImpl.h:1560

InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, const Value *Op0, const Value *Op1) const override

Definition BasicTTIImpl.h:1435

InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override

Definition BasicTTIImpl.h:1045

InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, TTI::TargetCostKind CostKind) const override

Try to calculate op costs for min/max reduction operations.

Definition BasicTTIImpl.h:3231

bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty) const override

Definition BasicTTIImpl.h:507

InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override

Definition BasicTTIImpl.h:569

unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const override

Definition BasicTTIImpl.h:707

InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override

Definition BasicTTIImpl.h:1378

TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const override

Definition BasicTTIImpl.h:887

bool shouldBuildLookupTables() const override

Definition BasicTTIImpl.h:634

InstructionCost getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}) const override

Estimate the overhead of scalarizing an instruction.

Definition BasicTTIImpl.h:900

bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override

Definition BasicTTIImpl.h:435

bool isProfitableToHoist(Instruction *I) const override

Definition BasicTTIImpl.h:553

unsigned getNumberOfParts(Type *Tp) const override

Definition BasicTTIImpl.h:3081

unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const override

Definition BasicTTIImpl.h:861

InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override

Definition BasicTTIImpl.h:1455

bool useAA() const override

Definition BasicTTIImpl.h:557

unsigned getPrefetchDistance() const override

Definition BasicTTIImpl.h:857

TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind, ArrayRef< int > Mask, VectorType *SrcTy, int &Index, VectorType *&SubTy) const

Definition BasicTTIImpl.h:1123

unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const override

Definition BasicTTIImpl.h:487

bool isLegalAddScalableImmediate(int64_t Imm) const override

Definition BasicTTIImpl.h:462

unsigned getAssumedAddrSpace(const Value *V) const override

Definition BasicTTIImpl.h:439

std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const override

Definition BasicTTIImpl.h:819

bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace, Instruction *I=nullptr, int64_t ScalableOffset=0) const override

Definition BasicTTIImpl.h:470

bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override

Definition BasicTTIImpl.h:421

bool areInlineCompatible(const Function *Caller, const Function *Callee) const override

Definition BasicTTIImpl.h:395

bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty) const override

Definition BasicTTIImpl.h:512

bool haveFastSqrt(Type *Ty) const override

Definition BasicTTIImpl.h:665

bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const override

Definition BasicTTIImpl.h:430

InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask, TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override

Definition BasicTTIImpl.h:1179

InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, Value *Scalar, ArrayRef< std::tuple< Value *, User *, int > > ScalarUserAndIdx) const override

Definition BasicTTIImpl.h:1446

unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JumpTableSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const override

Definition BasicTTIImpl.h:575

Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const override

Definition BasicTTIImpl.h:453

unsigned adjustInliningThreshold(const CallBase *CB) const override

Definition BasicTTIImpl.h:704

unsigned getInliningThresholdMultiplier() const override

Definition BasicTTIImpl.h:703

int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset)

Definition BasicTTIImpl.h:483

bool shouldBuildRelLookupTables() const override

Definition BasicTTIImpl.h:640

bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, int RetIdx) const override

Definition BasicTTIImpl.h:948

InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override

Definition BasicTTIImpl.h:3219

InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override

Definition BasicTTIImpl.h:1383

InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace) const override

Definition BasicTTIImpl.h:534

unsigned getEpilogueVectorizationMinVF() const override

Definition BasicTTIImpl.h:800

InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index, TTI::TargetCostKind CostKind) const override

Definition BasicTTIImpl.h:1368

InstructionCost getVectorSplitCost() const

Definition BasicTTIImpl.h:3329

bool isTruncateFree(Type *Ty1, Type *Ty2) const override

Definition BasicTTIImpl.h:549

std::optional< unsigned > getMaxVScale() const override

Definition BasicTTIImpl.h:891

unsigned getFlatAddressSpace() const override

Definition BasicTTIImpl.h:425

InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override

Compute a cost of the given call instruction.

Definition BasicTTIImpl.h:3076

void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override

Definition BasicTTIImpl.h:714

InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const

Try to calculate arithmetic and shuffle op costs for reduction intrinsics.

Definition BasicTTIImpl.h:3125

~BasicTTIImplBase() override=default

std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override

Definition BasicTTIImpl.h:449

unsigned getMaxPrefetchIterationsAhead() const override

Definition BasicTTIImpl.h:869

void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override

Definition BasicTTIImpl.h:786

InstructionCost getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const

Get intrinsic cost based on argument types.

Definition BasicTTIImpl.h:2176

bool hasBranchDivergence(const Function *F=nullptr) const override

Definition BasicTTIImpl.h:409

InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty, TTI::TargetCostKind CostKind) const

Try to calculate the cost of performing strict (in-order) reductions, which involves doing a sequence...

Definition BasicTTIImpl.h:3201

bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const override

Definition BasicTTIImpl.h:932

bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const override

Definition BasicTTIImpl.h:804

std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const override

Definition BasicTTIImpl.h:843

bool shouldPrefetchAddressSpace(unsigned AS) const override

Definition BasicTTIImpl.h:877

bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace, Align Alignment, unsigned *Fast) const override

Definition BasicTTIImpl.h:387

unsigned getCacheLineSize() const override

Definition BasicTTIImpl.h:853

std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override

Definition BasicTTIImpl.h:814

bool shouldDropLSRSolutionIfLessProfitable() const override

Definition BasicTTIImpl.h:526

int getInlinerVectorBonusPercent() const override

Definition BasicTTIImpl.h:712

bool isVScaleKnownToBeAPowerOfTwo() const override

Definition BasicTTIImpl.h:895

InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind) const override

Definition BasicTTIImpl.h:3308

InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const override

Definition BasicTTIImpl.h:1469

InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const override

Definition BasicTTIImpl.h:1208

std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const

Estimate the cost of type-legalization and the legalized type.

Definition BasicTTIImpl.h:1009

bool isLegalAddImmediate(int64_t imm) const override

Definition BasicTTIImpl.h:458

InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const override

Definition BasicTTIImpl.h:1483

unsigned getMaxInterleaveFactor(ElementCount VF) const override

Definition BasicTTIImpl.h:1043

bool isSingleThreaded() const override

Definition BasicTTIImpl.h:443

InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert, bool Extract, TTI::TargetCostKind CostKind) const

Helper wrapper for the DemandedElts variant of getScalarizationOverhead.

Definition BasicTTIImpl.h:954

bool isProfitableLSRChainElement(Instruction *I) const override

Definition BasicTTIImpl.h:530

bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override

Definition BasicTTIImpl.h:417

bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx) const override

Definition BasicTTIImpl.h:942

bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx) const override

Definition BasicTTIImpl.h:937

std::optional< unsigned > getVScaleForTuning() const override

Definition BasicTTIImpl.h:892

InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind) const override

Definition BasicTTIImpl.h:3279

InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const override

Get intrinsic cost based on arguments.

Definition BasicTTIImpl.h:1708

TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const override

Definition BasicTTIImpl.h:809

std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const override

Definition BasicTTIImpl.h:826

InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *, const SCEV *, TTI::TargetCostKind) const override

Definition BasicTTIImpl.h:3099

bool isSourceOfDivergence(const Value *V) const override

Definition BasicTTIImpl.h:413

bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const override

Definition BasicTTIImpl.h:672

InstructionCost getScalarizationOverhead(VectorType *RetTy, ArrayRef< const Value * > Args, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const

Estimate the overhead of scalarizing the inputs and outputs of an instruction, with return type RetTy...

Definition BasicTTIImpl.h:990

std::optional< unsigned > getCacheSize(TargetTransformInfo::CacheLevel Level) const override

Definition BasicTTIImpl.h:837

bool isAlwaysUniform(const Value *V) const override

Definition BasicTTIImpl.h:415

bool isLegalICmpImmediate(int64_t imm) const override

Definition BasicTTIImpl.h:466

bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const override

Definition BasicTTIImpl.h:794

unsigned getRegUsageForType(Type *Ty) const override

Definition BasicTTIImpl.h:564

InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const override

Get memory intrinsic cost based on arguments.

Definition BasicTTIImpl.h:3004

BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)

Definition BasicTTIImpl.h:378

InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo={TTI::OK_AnyValue, TTI::OP_None}, const Instruction *I=nullptr) const override

Definition BasicTTIImpl.h:1515

bool isTypeLegal(Type *Ty) const override

Definition BasicTTIImpl.h:559

bool enableWritePrefetching() const override

Definition BasicTTIImpl.h:873

bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const override

Definition BasicTTIImpl.h:517

InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const override

Estimate the overhead of scalarizing an instruction's operands.

Definition BasicTTIImpl.h:969

bool isNumRegsMajorCostOfLSR() const override

Definition BasicTTIImpl.h:522

BasicTTIImpl(const TargetMachine *TM, const Function &F)

size_type count() const

count - Returns the number of bits which are set.

BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...

Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...

static Type * makeCmpResultType(Type *opnd_type)

Create a result type for fcmp/icmp.

Predicate

This enumeration lists the possible predicates for CmpInst subclasses.

@ ICMP_UGT

unsigned greater than

@ ICMP_SGT

signed greater than

@ ICMP_ULT

unsigned less than

@ FCMP_UNO

1 0 0 0 True if unordered: isnan(X) | isnan(Y)

static CmpInst::Predicate getGTPredicate(Intrinsic::ID ID)

static CmpInst::Predicate getLTPredicate(Intrinsic::ID ID)

This class represents a range of values.

A parsed version of the target data layout string in and methods for querying it.

constexpr bool isVector() const

One or more elements.

static constexpr ElementCount getFixed(ScalarTy MinVal)

constexpr bool isScalar() const

Exactly one element.

Convenience struct for specifying and reasoning about fast-math flags.

Container class for subtarget features.

Class to represent fixed width SIMD vectors.

unsigned getNumElements() const

static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)

AttributeList getAttributes() const

Return the attribute list for this Function.

The core instruction combiner logic.

static InstructionCost getInvalid(CostType Val=0)

unsigned getOpcode() const

Returns a member of one of the enums like Instruction::Add.

static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)

This static method is the primary way of constructing an IntegerType.

FastMathFlags getFlags() const

const TargetLibraryInfo * getLibInfo() const

const SmallVectorImpl< Type * > & getArgTypes() const

Type * getReturnType() const

bool skipScalarizationCost() const

const SmallVectorImpl< const Value * > & getArgs() const

InstructionCost getScalarizationCost() const

const IntrinsicInst * getInst() const

Intrinsic::ID getID() const

bool isTypeBasedOnly() const

A wrapper class for inspecting calls to intrinsic functions.

This is an important class for using LLVM in a threaded context.

Represents a single loop in the control flow graph.

const FeatureBitset & getFeatureBits() const

TypeSize getStoreSize() const

Return the number of bytes overwritten by a store of the specified value type.

Information for memory intrinsic cost model.

Align getAlignment() const

Type * getDataType() const

bool getVariableMask() const

Intrinsic::ID getID() const

static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)

This constructs a pointer to an object of the specified type in a numbered address space.

Analysis providing profile information.

This class represents an analyzed expression in the program.

The main scalar evolution driver.

static LLVM_ABI bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)

Return true if this shuffle mask chooses all elements with the same value as the first element of exa...

static LLVM_ABI bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)

Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...

static LLVM_ABI bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)

Return true if this shuffle mask chooses elements from its source vectors without lane crossings.

static LLVM_ABI bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)

Return true if this shuffle mask is an extract subvector mask.

static LLVM_ABI bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)

Return true if this shuffle mask swaps the order of elements from exactly one source vector.

static LLVM_ABI bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)

Return true if this shuffle mask is a transpose mask.

static LLVM_ABI bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)

Return true if this shuffle mask is an insert subvector mask.

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

StackOffset holds a fixed and a scalable offset in bytes.

static StackOffset getScalable(int64_t Scalable)

static StackOffset getFixed(int64_t Fixed)

static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)

This creates an identified struct.

Provides information about what library functions are available for the current target.

This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...

bool isOperationExpand(unsigned Op, EVT VT) const

Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...

int InstructionOpcodeToISD(unsigned Opcode) const

Get the ISD node that corresponds to the Instruction class opcode.

EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const

Return the EVT corresponding to this LLVM type.

LegalizeAction

This enum indicates whether operations are valid for a target, and if not, what action should be used...

virtual bool preferSelectsOverBooleanArithmetic(EVT VT) const

Should we prefer selects to doing arithmetic on boolean types.

virtual bool isZExtFree(Type *FromTy, Type *ToTy) const

Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...

@ TypeScalarizeScalableVector

virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const

Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...

virtual bool areJTsAllowed(const Function *Fn) const

Return true if lowering to a jump table is allowed.

bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const

Return true if the specified operation is legal on this target or can be made legal using promotion.

bool isOperationCustom(unsigned Op, EVT VT) const

Return true if the operation uses custom lowering, regardless of whether the type is legal or not.

bool isSuitableForBitTests(const DenseMap< const BasicBlock *, unsigned int > &DestCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const

Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...

virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const

Return true if it's free to truncate a value of type FromTy to type ToTy.

bool isTypeLegal(EVT VT) const

Return true if the target has native support for the specified value type.

virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const

Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.

bool isOperationLegal(unsigned Op, EVT VT) const

Return true if the specified operation is legal on this target.

LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const

Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...

LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const

Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...

bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const

Return true if the specified operation is legal on this target or can be made legal with custom lower...

bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const

Return true if the specified load with extension is legal on this target.

LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const

Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...

virtual bool isFAbsFree(EVT VT) const

Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...

bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const

Return true if the specified operation is legal on this target or can be made legal with custom lower...

std::pair< LegalizeTypeAction, EVT > LegalizeKind

LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.

Primary interface to the complete machine description for the target machine.

bool isPositionIndependent() const

const Triple & getTargetTriple() const

virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const

Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...

CodeModel::Model getCodeModel() const

Returns the code model.

TargetSubtargetInfo - Generic base class for all target subtargets.

virtual bool isProfitableLSRChainElement(Instruction *I) const

virtual const DataLayout & getDataLayout() const

virtual std::optional< unsigned > getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const

virtual std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const

virtual bool shouldDropLSRSolutionIfLessProfitable() const

virtual bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const

virtual bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const

virtual std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const

virtual std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const

virtual unsigned getEpilogueVectorizationMinVF() const

virtual bool isLoweredToCall(const Function *F) const

virtual InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info, TTI::OperandValueInfo Opd2Info, ArrayRef< const Value * > Args, const Instruction *CxtI=nullptr) const

virtual InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I=nullptr) const

virtual bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const

virtual InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I) const

virtual InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const

virtual InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info, const Instruction *I) const

virtual bool isNumRegsMajorCostOfLSR() const

virtual TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const

TargetTransformInfoImplCRTPBase(const DataLayout &DL)

InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType, TTI::TargetCostKind CostKind) const override

This pass provides access to the codegen interfaces that are needed for IR-level transformations.

static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)

Collect properties of V used in cost analysis, e.g. OP_PowerOf2.

TargetCostKind

The kind of cost model.

@ TCK_RecipThroughput

Reciprocal throughput.

@ TCK_CodeSize

Instruction code size.

static bool requiresOrderedReduction(std::optional< FastMathFlags > FMF)

A helper function to determine the type of reduction algorithm used for a given Opcode and set of Fas...

@ TCC_Expensive

The cost of a 'div' instruction on x86.

@ TCC_Basic

The cost of a typical 'add' instruction.

MemIndexedMode

The type of load/store indexing.

@ MIM_Unindexed

No indexing.

@ MIM_PostInc

Post-incrementing.

@ MIM_PostDec

Post-decrementing.

@ MIM_PreDec

Pre-decrementing.

@ MIM_PreInc

Pre-incrementing.

ShuffleKind

The various kinds of shuffle patterns for vector queries.

@ SK_InsertSubvector

InsertSubvector. Index indicates start offset.

@ SK_Select

Selects elements from the corresponding lane of either source operand.

@ SK_PermuteSingleSrc

Shuffle elements of single source vector with any shuffle mask.

@ SK_Transpose

Transpose two vectors.

@ SK_Splice

Concatenates elements from the first input vector with elements of the second input vector.

@ SK_Broadcast

Broadcast element 0 to all other elements.

@ SK_PermuteTwoSrc

Merge elements from two source vectors into one with any shuffle mask.

@ SK_Reverse

Reverse the order of the vector.

@ SK_ExtractSubvector

ExtractSubvector Index indicates start offset.

CastContextHint

Represents a hint about the context in which a cast is used.

@ None

The cast is not used with a load/store of any kind.

@ Normal

The cast is used with a normal load/store.

@ OK_UniformConstantValue

CacheLevel

The possible cache levels.

Triple - Helper class for working with autoconf configuration names.

ArchType getArch() const

Get the parsed architecture type of this triple.

LLVM_ABI bool isArch64Bit() const

Test whether the architecture is 64-bit.

bool isOSDarwin() const

Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, DriverKit, XROS, or bridgeOS).

static constexpr TypeSize getFixed(ScalarTy ExactSize)

The instances of the Type class are immutable: once they are created, they are never changed.

bool isVectorTy() const

True if this is an instance of VectorType.

static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)

Type * getScalarType() const

If this is a vector type, return the element type, otherwise return 'this'.

LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const

Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...

LLVMContext & getContext() const

Return the LLVMContext in which this type was uniqued.

LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY

If this is a vector type, return the getPrimitiveSizeInBits value for the element type.

static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)

static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)

bool isFPOrFPVectorTy() const

Return true if this is a FP type or a vector of FP.

Type * getContainedType(unsigned i) const

This method is used to implement the type iterator (defined at the end of the file).

bool isVoidTy() const

Return true if this is 'void'.

Value * getOperand(unsigned i) const

static LLVM_ABI bool isVPBinOp(Intrinsic::ID ID)

static LLVM_ABI bool isVPCast(Intrinsic::ID ID)

static LLVM_ABI bool isVPCmp(Intrinsic::ID ID)

static LLVM_ABI std::optional< unsigned > getFunctionalOpcodeForVP(Intrinsic::ID ID)

static LLVM_ABI std::optional< Intrinsic::ID > getFunctionalIntrinsicIDForVP(Intrinsic::ID ID)

static LLVM_ABI bool isVPIntrinsic(Intrinsic::ID)

static LLVM_ABI bool isVPReduction(Intrinsic::ID ID)

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

Base class of all SIMD vector types.

static VectorType * getHalfElementsVectorType(VectorType *VTy)

This static method returns a VectorType with half as many elements as the input type and the same ele...

static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)

This static method is the primary way to construct an VectorType.

Type * getElementType() const

constexpr ScalarTy getFixedValue() const

static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)

constexpr bool isScalable() const

Returns whether the quantity is scaled by a runtime quantity (vscale).

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

constexpr char Args[]

Key for Kernel::Metadata::mArgs.

LLVM_ABI APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth, bool MatchAllBits=false)

Splat/Merge neighboring bits to widen/narrow the bitmask represented by.

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

@ Fast

Attempts to make calls as fast as possible (e.g.

@ C

The default llvm calling convention, compatible with C.

ISD namespace - This namespace contains an enum which represents all of the SelectionDAG node types a...

@ BSWAP

Byte Swap and Counting operators.

@ SMULFIX

RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...

@ FMA

FMA - Perform a * b + c with no intermediate rounding step.

@ FADD

Simple binary floating point operators.

@ ABS

ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.

@ SDIVREM

SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.

@ SSUBO

Same for subtraction.

@ FCANONICALIZE

Returns platform specific canonical encoding of a floating point number.

@ SSUBSAT

RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...

@ SELECT

Select(COND, TRUEVAL, FALSEVAL).

@ SADDO

RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.

@ SMULO

Same for multiplication.

@ SMIN

[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.

@ VSELECT

Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...

@ SCMP

[US]CMP - 3-way comparison of signed or unsigned integers.

@ FP_TO_SINT_SAT

FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...

@ FCOPYSIGN

FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.

@ SADDSAT

RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...

MemIndexedMode

MemIndexedMode enum - This enum defines the load / store indexed addressing modes.

LLVM_ABI bool isTargetIntrinsic(ID IID)

isTargetIntrinsic - Returns true if IID is an intrinsic specific to a certain target.

LLVM_ABI Libcall getSINCOSPI(EVT RetVT)

getSINCOSPI - Return the SINCOSPI_* value for the given types, or UNKNOWN_LIBCALL if there is none.

LLVM_ABI Libcall getMODF(EVT VT)

getMODF - Return the MODF_* value for the given types, or UNKNOWN_LIBCALL if there is none.

LLVM_ABI Libcall getSINCOS(EVT RetVT)

getSINCOS - Return the SINCOS_* value for the given types, or UNKNOWN_LIBCALL if there is none.

DiagnosticInfoOptimizationBase::Argument NV

friend class Instruction

Iterator for Instructions in a `BasicBlock.

This is an optimization pass for GlobalISel generic memory operations.

bool all_of(R &&range, UnaryPredicate P)

Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.

LLVM_ABI Intrinsic::ID getMinMaxReductionIntrinsicOp(Intrinsic::ID RdxID)

Returns the min/max intrinsic used when expanding a min/max reduction.

detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)

zip iterator that assumes that all iteratees have the same length.

auto enumerate(FirstRange &&First, RestRanges &&...Rest)

Given two or more input ranges, returns a new range whose values are tuples (A, B,...

Type * toScalarizedTy(Type *Ty)

A helper for converting vectorized types to scalarized (non-vector) types.

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

auto dyn_cast_if_present(const Y &Val)

dyn_cast_if_present - Functionally identical to dyn_cast, except that a null (or none in the case ...

LLVM_ABI unsigned getArithmeticReductionInstruction(Intrinsic::ID RdxID)

Returns the arithmetic instruction opcode used when expanding a reduction.

bool isVectorizedTy(Type *Ty)

Returns true if Ty is a vector type or a struct of vector types where all vector types share the same...

detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)

Returns a concatenated range across two or more ranges.

auto dyn_cast_or_null(const Y &Val)

constexpr bool has_single_bit(T Value) noexcept

bool any_of(R &&range, UnaryPredicate P)

Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.

unsigned Log2_32(uint32_t Value)

Return the floor log base 2 of the specified value, -1 if the value is zero.

constexpr bool isPowerOf2_32(uint32_t Value)

Return true if the argument is a power of two > 0.

ElementCount getVectorizedTypeVF(Type *Ty)

Returns the number of vector elements for a vectorized type.

LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)

Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...

class LLVM_GSL_OWNER SmallVector

Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

constexpr int PoisonMaskElem

constexpr T divideCeil(U Numerator, V Denominator)

Returns the integer ceil(Numerator / Denominator).

FunctionAddr VTableAddr uintptr_t uintptr_t Data

@ UMin

Unsigned integer min implemented in terms of select(cmp()).

@ UMax

Unsigned integer max implemented in terms of select(cmp()).

DWARFExpression::Operation Op

ArrayRef(const T &OneElt) -> ArrayRef< T >

constexpr unsigned BitWidth

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

ArrayRef< Type * > getContainedTypes(Type *const &Ty)

Returns the types contained in Ty.

cl::opt< unsigned > PartialUnrollingThreshold

LLVM_ABI bool isVectorizedStructTy(StructType *StructTy)

Returns true if StructTy is an unpacked literal struct where all elements are vectors of matching ele...

This struct is a compact representation of a valid (non-zero power of two) alignment.

bool isSimple() const

Test if the given EVT is simple (as opposed to being extended).

ElementCount getVectorElementCount() const

static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)

Return the value type corresponding to the specified type.

MVT getSimpleVT() const

Return the SimpleValueType held in the specified simple EVT.

static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)

Returns the EVT that represents an integer with the given number of bits.

LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const

This method returns an LLVM type corresponding to the specified EVT.

Attributes of a target dependent hardware loop.

static bool hasVectorMaskArgument(RTLIB::LibcallImpl Impl)

Returns true if the function has a vector mask argument, which is assumed to be the last argument.

This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...

bool AllowPeeling

Allow peeling off loop iterations.

bool AllowLoopNestsPeeling

Allow peeling off loop iterations for loop nests.

bool PeelProfiledIterations

Allow peeling basing on profile.

unsigned PeelCount

A forced peeling factor (the number of bodied of the original loop that should be peeled off before t...

Parameters that control the generic loop unrolling transformation.

bool UpperBound

Allow using trip count upper bound to unroll loops.

unsigned PartialOptSizeThreshold

The cost threshold for the unrolled loop when optimizing for size, like OptSizeThreshold,...

unsigned PartialThreshold

The cost threshold for the unrolled loop, like Threshold, but used for partial/runtime unrolling (set...

bool Runtime

Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...

bool Partial

Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...

unsigned OptSizeThreshold

The cost threshold for the unrolled loop when optimizing for size (set to UINT_MAX to disable).