LLVM: lib/Analysis/TargetTransformInfo.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

24#include

25#include

26

27using namespace llvm;

29

30#define DEBUG_TYPE "tti"

31

34 cl::desc("Recognize reduction patterns."));

35

38 cl::desc("Use this to override the target cache line size when "

39 "specified by the user."));

40

43 cl::desc("Use this to override the target's minimum page size."));

44

48 "Use this to override the target's predictable branch threshold (%)."));

49

50namespace {

51

52

53

54

58};

59}

60

62 std::unique_ptr Impl)

63 : TTIImpl(std::move(Impl)) {}

64

66

67

71 return false;

72 return true;

73}

74

79 ScalarizationCost(ScalarizationCost), LibInfo(LibInfo) {

80

82 FMF = FPMO->getFastMathFlags();

83

84 if (!TypeBasedOnly)

85 Arguments.insert(Arguments.begin(), CI.arg_begin(), CI.arg_end());

87 ParamTys.insert(ParamTys.begin(), FTy->param_begin(), FTy->param_end());

88}

89

95 : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {

96 ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());

97}

98

101 : RetTy(Ty), IID(Id) {

102

103 Arguments.insert(Arguments.begin(), Args.begin(), Args.end());

104 ParamTys.reserve(Arguments.size());

107}

108

113 : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost),

114 LibInfo(LibInfo) {

115 ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());

116 Arguments.insert(Arguments.begin(), Args.begin(), Args.end());

117}

118

126

132 L->getExitingBlocks(ExitingBlocks);

133

134 for (BasicBlock *BB : ExitingBlocks) {

135

136

137 if (L->isLoopLatch(BB)) {

139 continue;

140 }

141

144 continue;

146 if (ConstEC->getValue()->isZero())

147 continue;

149 continue;

150

152 continue;

153

154

155

156

158 continue;

159

160

161

162

163

164

165

166

167 bool NotAlways = false;

169 if (L->contains(Pred))

170 continue;

171

173 NotAlways = true;

174 break;

175 }

176 }

177

178 if (NotAlways)

179 continue;

180

181

183 if (!TI)

184 continue;

185

187 if (!BI->isConditional())

188 continue;

189

191 } else

192 continue;

193

194

195

198 break;

199 }

200

202 return false;

203 return true;

204}

205

207 : TTIImpl(std::make_unique(DL)) {}

208

210

212 : TTIImpl(std::move(Arg.TTIImpl)) {}

213

215 TTIImpl = std::move(RHS.TTIImpl);

216 return *this;

217}

218

220 return TTIImpl->getInliningThresholdMultiplier();

221}

222

223unsigned

225 return TTIImpl->getInliningCostBenefitAnalysisSavingsMultiplier();

226}

227

228unsigned

230 const {

231 return TTIImpl->getInliningCostBenefitAnalysisProfitableMultiplier();

232}

233

235 return TTIImpl->getInliningLastCallToStaticBonus();

236}

237

238unsigned

240 return TTIImpl->adjustInliningThreshold(CB);

241}

242

245 return TTIImpl->getCallerAllocaCost(CB, AI);

246}

247

249 return TTIImpl->getInlinerVectorBonusPercent();

250}

251

255 return TTIImpl->getGEPCost(PointeeType, Ptr, Operands, AccessType, CostKind);

256}

257

262 assert((Base || !Info.isSameBase()) &&

263 "If pointers have same base address it has to be provided.");

264 return TTIImpl->getPointersChainCost(Ptrs, Base, Info, AccessTy, CostKind);

265}

266

270 return TTIImpl->getEstimatedNumberOfCaseClusters(SI, JTSize, PSI, BFI);

271}

272

279 "TTI should not produce negative costs!");

281}

282

286 : TTIImpl->getPredictableBranchThreshold();

287}

288

290 return TTIImpl->getBranchMispredictPenalty();

291}

292

294 return TTIImpl->hasBranchDivergence(F);

295}

296

299

301 if (Call->hasFnAttr(Attribute::NoDivergenceSource))

303 }

304 return TTIImpl->getInstructionUniformity(V);

305}

306

308 unsigned ToAS) const {

309 return TTIImpl->isValidAddrSpaceCast(FromAS, ToAS);

310}

311

313 unsigned ToAS) const {

314 return TTIImpl->addrspacesMayAlias(FromAS, ToAS);

315}

316

318 return TTIImpl->getFlatAddressSpace();

319}

320

323 return TTIImpl->collectFlatAddressOperands(OpIndexes, IID);

324}

325

327 unsigned ToAS) const {

328 return TTIImpl->isNoopAddrSpaceCast(FromAS, ToAS);

329}

330

332 unsigned AS) const {

333 return TTIImpl->canHaveNonUndefGlobalInitializerInAddressSpace(AS);

334}

335

337 return TTIImpl->getAssumedAddrSpace(V);

338}

339

341 return TTIImpl->isSingleThreaded();

342}

343

344std::pair<const Value *, unsigned>

346 return TTIImpl->getPredicatedAddrSpace(V);

347}

348

351 return TTIImpl->rewriteIntrinsicWithAddressSpace(II, OldV, NewV);

352}

353

355 return TTIImpl->isLoweredToCall(F);

356}

357

361 return TTIImpl->isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);

362}

363

365 return TTIImpl->getEpilogueVectorizationMinVF();

366}

367

370 return TTIImpl->preferPredicateOverEpilogue(TFI);

371}

372

374 bool IVUpdateMayOverflow) const {

375 return TTIImpl->getPreferredTailFoldingStyle(IVUpdateMayOverflow);

376}

377

378std::optional<Instruction *>

381 return TTIImpl->instCombineIntrinsic(IC, II);

382}

383

386 bool &KnownBitsComputed) const {

387 return TTIImpl->simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,

388 KnownBitsComputed);

389}

390

393 APInt &UndefElts2, APInt &UndefElts3,

395 SimplifyAndSetOp) const {

396 return TTIImpl->simplifyDemandedVectorEltsIntrinsic(

397 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,

398 SimplifyAndSetOp);

399}

400

404 return TTIImpl->getUnrollingPreferences(L, SE, UP, ORE);

405}

406

409 return TTIImpl->getPeelingPreferences(L, SE, PP);

410}

411

413 return TTIImpl->isLegalAddImmediate(Imm);

414}

415

417 return TTIImpl->isLegalAddScalableImmediate(Imm);

418}

419

421 return TTIImpl->isLegalICmpImmediate(Imm);

422}

423

425 int64_t BaseOffset,

426 bool HasBaseReg, int64_t Scale,

427 unsigned AddrSpace,

429 int64_t ScalableOffset) const {

430 return TTIImpl->isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,

431 Scale, AddrSpace, I, ScalableOffset);

432}

433

435 const LSRCost &C2) const {

436 return TTIImpl->isLSRCostLess(C1, C2);

437}

438

440 return TTIImpl->isNumRegsMajorCostOfLSR();

441}

442

444 return TTIImpl->shouldDropLSRSolutionIfLessProfitable();

445}

446

448 return TTIImpl->isProfitableLSRChainElement(I);

449}

450

452 return TTIImpl->canMacroFuseCmp();

453}

454

459 return TTIImpl->canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);

460}

461

465 return TTIImpl->getPreferredAddressingMode(L, SE);

466}

467

471 return TTIImpl->isLegalMaskedStore(DataType, Alignment, AddressSpace,

473}

474

478 return TTIImpl->isLegalMaskedLoad(DataType, Alignment, AddressSpace,

480}

481

483 Align Alignment) const {

484 return TTIImpl->isLegalNTStore(DataType, Alignment);

485}

486

488 return TTIImpl->isLegalNTLoad(DataType, Alignment);

489}

490

493 return TTIImpl->isLegalBroadcastLoad(ElementTy, NumElements);

494}

495

497 Align Alignment) const {

498 return TTIImpl->isLegalMaskedGather(DataType, Alignment);

499}

500

502 VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,

504 return TTIImpl->isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask);

505}

506

508 Align Alignment) const {

509 return TTIImpl->isLegalMaskedScatter(DataType, Alignment);

510}

511

513 Align Alignment) const {

514 return TTIImpl->forceScalarizeMaskedGather(DataType, Alignment);

515}

516

518 Align Alignment) const {

519 return TTIImpl->forceScalarizeMaskedScatter(DataType, Alignment);

520}

521

523 Align Alignment) const {

524 return TTIImpl->isLegalMaskedCompressStore(DataType, Alignment);

525}

526

528 Align Alignment) const {

529 return TTIImpl->isLegalMaskedExpandLoad(DataType, Alignment);

530}

531

533 Align Alignment) const {

534 return TTIImpl->isLegalStridedLoadStore(DataType, Alignment);

535}

536

539 unsigned AddrSpace) const {

540 return TTIImpl->isLegalInterleavedAccessType(VTy, Factor, Alignment,

541 AddrSpace);

542}

543

545 Type *DataType) const {

546 return TTIImpl->isLegalMaskedVectorHistogram(AddrType, DataType);

547}

548

550 return TTIImpl->enableOrderedReductions();

551}

552

554 return TTIImpl->hasDivRemOp(DataType, IsSigned);

555}

556

558 unsigned AddrSpace) const {

559 return TTIImpl->hasVolatileVariant(I, AddrSpace);

560}

561

563 return TTIImpl->prefersVectorizedAddressing();

564}

565

568 int64_t Scale, unsigned AddrSpace) const {

570 Ty, BaseGV, BaseOffset, HasBaseReg, Scale, AddrSpace);

571 assert(Cost >= 0 && "TTI should not produce negative costs!");

573}

574

576 return TTIImpl->LSRWithInstrQueries();

577}

578

580 return TTIImpl->isTruncateFree(Ty1, Ty2);

581}

582

584 return TTIImpl->isProfitableToHoist(I);

585}

586

588

590 return TTIImpl->isTypeLegal(Ty);

591}

592

594 return TTIImpl->getRegUsageForType(Ty);

595}

596

598 return TTIImpl->shouldBuildLookupTables();

599}

600

603 return TTIImpl->shouldBuildLookupTablesForConstant(C);

604}

605

607 return TTIImpl->shouldBuildRelLookupTables();

608}

609

611 return TTIImpl->useColdCCForColdCall(F);

612}

613

615 return TTIImpl->useFastCCForInternalCall(F);

616}

617

620 return TTIImpl->isTargetIntrinsicTriviallyScalarizable(ID);

621}

622

625 return TTIImpl->isTargetIntrinsicWithScalarOpAtArg(ID, ScalarOpdIdx);

626}

627

630 return TTIImpl->isTargetIntrinsicWithOverloadTypeAtArg(ID, OpdIdx);

631}

632

635 return TTIImpl->isTargetIntrinsicWithStructReturnOverloadAtField(ID, RetIdx);

636}

637

639 VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,

642 return TTIImpl->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,

644}

645

648 return TTIImpl->getOperandsScalarizationOverhead(Tys, CostKind);

649}

650

652 return TTIImpl->supportsEfficientVectorElementLoadStore();

653}

654

656 return TTIImpl->supportsTailCalls();

657}

658

660 return TTIImpl->supportsTailCallFor(CB);

661}

662

664 bool LoopHasReductions) const {

665 return TTIImpl->enableAggressiveInterleaving(LoopHasReductions);

666}

667

670 return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp);

671}

672

674 return TTIImpl->enableSelectOptimize();

675}

676

679 return TTIImpl->shouldTreatInstructionLikeSelect(I);

680}

681

683 return TTIImpl->enableInterleavedAccessVectorization();

684}

685

687 return TTIImpl->enableMaskedInterleavedAccessVectorization();

688}

689

691 return TTIImpl->isFPVectorizationPotentiallyUnsafe();

692}

693

694bool

699 unsigned *Fast) const {

700 return TTIImpl->allowsMisalignedMemoryAccesses(Context, BitWidth,

702}

703

706 return TTIImpl->getPopcntSupport(IntTyWidthInBit);

707}

708

710 return TTIImpl->haveFastSqrt(Ty);

711}

712

715 return TTIImpl->isExpensiveToSpeculativelyExecute(I);

716}

717

719 return TTIImpl->isFCmpOrdCheaperThanFCmpZero(Ty);

720}

721

724 assert(Cost >= 0 && "TTI should not produce negative costs!");

726}

727

729 unsigned Idx,

730 const APInt &Imm,

731 Type *Ty) const {

732 InstructionCost Cost = TTIImpl->getIntImmCodeSizeCost(Opcode, Idx, Imm, Ty);

733 assert(Cost >= 0 && "TTI should not produce negative costs!");

735}

736

741 assert(Cost >= 0 && "TTI should not produce negative costs!");

743}

744

746 unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty,

749 TTIImpl->getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst);

750 assert(Cost >= 0 && "TTI should not produce negative costs!");

752}

753

759 TTIImpl->getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);

760 assert(Cost >= 0 && "TTI should not produce negative costs!");

762}

763

766 return TTIImpl->preferToKeepConstantsAttached(Inst, Fn);

767}

768

770 return TTIImpl->getNumberOfRegisters(ClassID);

771}

772

774 bool IsStore) const {

775 return TTIImpl->hasConditionalLoadStoreForType(Ty, IsStore);

776}

777

779 Type *Ty) const {

780 return TTIImpl->getRegisterClassForType(Vector, Ty);

781}

782

784 return TTIImpl->getRegisterClassName(ClassID);

785}

786

789 return TTIImpl->getRegisterBitWidth(K);

790}

791

793 return TTIImpl->getMinVectorRegisterBitWidth();

794}

795

797 return TTIImpl->getMaxVScale();

798}

799

801 return TTIImpl->getVScaleForTuning();

802}

803

805 return TTIImpl->isVScaleKnownToBeAPowerOfTwo();

806}

807

810 return TTIImpl->shouldMaximizeVectorBandwidth(K);

811}

812

814 bool IsScalable) const {

815 return TTIImpl->getMinimumVF(ElemWidth, IsScalable);

816}

817

819 unsigned Opcode) const {

820 return TTIImpl->getMaximumVF(ElemWidth, Opcode);

821}

822

824 Type *ScalarValTy) const {

825 return TTIImpl->getStoreMinimumVF(VF, ScalarMemTy, ScalarValTy);

826}

827

829 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const {

830 return TTIImpl->shouldConsiderAddressTypePromotion(

831 I, AllowPromotionWithoutCommonHeader);

832}

833

836 : TTIImpl->getCacheLineSize();

837}

838

839std::optional

841 return TTIImpl->getCacheSize(Level);

842}

843

844std::optional

846 return TTIImpl->getCacheAssociativity(Level);

847}

848

851 : TTIImpl->getMinPageSize();

852}

853

855 return TTIImpl->getPrefetchDistance();

856}

857

859 unsigned NumMemAccesses, unsigned NumStridedMemAccesses,

860 unsigned NumPrefetches, bool HasCall) const {

861 return TTIImpl->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,

862 NumPrefetches, HasCall);

863}

864

866 return TTIImpl->getMaxPrefetchIterationsAhead();

867}

868

870 return TTIImpl->enableWritePrefetching();

871}

872

874 return TTIImpl->shouldPrefetchAddressSpace(AS);

875}

876

878 unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType,

882 return TTIImpl->getPartialReductionCost(Opcode, InputTypeA, InputTypeB,

883 AccumType, VF, OpAExtend, OpBExtend,

885}

886

888 return TTIImpl->getMaxInterleaveFactor(VF);

889}

890

895

896

899

902 if (CI->getValue().isPowerOf2())

904 else if (CI->getValue().isNegatedPowerOf2())

906 }

908 }

909

910

911

912

914 if (ShuffleInst->isZeroEltSplat())

916

918

919

920

922

923

929 if (CI->getValue().isPowerOf2())

931 else if (CI->getValue().isNegatedPowerOf2())

933 }

934 }

937 bool AllPow2 = true, AllNegPow2 = true;

938 for (uint64_t I = 0, E = CDS->getNumElements(); I != E; ++I) {

940 AllPow2 &= CI->getValue().isPowerOf2();

941 AllNegPow2 &= CI->getValue().isNegatedPowerOf2();

942 if (AllPow2 || AllNegPow2)

943 continue;

944 }

945 AllPow2 = AllNegPow2 = false;

946 break;

947 }

948 OpProps = AllPow2 ? OP_PowerOf2 : OpProps;

952 }

953

954 return {OpInfo, OpProps};

955}

956

962

963

964

965

966 if (TLibInfo && Opcode == Instruction::FRem) {

968 LibFunc Func;

969 if (VecTy &&

970 TLibInfo->getLibFunc(Instruction::FRem, Ty->getScalarType(), Func) &&

974 }

975

977 TTIImpl->getArithmeticInstrCost(Opcode, Ty, CostKind,

978 Op1Info, Op2Info,

979 Args, CxtI);

980 assert(Cost >= 0 && "TTI should not produce negative costs!");

982}

983

985 VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,

988 TTIImpl->getAltInstrCost(VecTy, Opcode0, Opcode1, OpcodeMask, CostKind);

989 assert(Cost >= 0 && "TTI should not produce negative costs!");

991}

992

999 "Expected the Mask to match the return size if given");

1001 "Expected the same scalar types");

1003 Kind, DstTy, SrcTy, Mask, CostKind, Index, SubTp, Args, CxtI);

1004 assert(Cost >= 0 && "TTI should not produce negative costs!");

1005 return Cost;

1006}

1007

1014

1018 switch (CastOpc) {

1019 case Instruction::CastOps::ZExt:

1021 case Instruction::CastOps::SExt:

1023 default:

1025 }

1027}

1028

1031 if (I)

1033

1034 auto getLoadStoreKind = [](const Value *V, unsigned LdStOp, unsigned MaskedOp,

1035 unsigned GatScatOp) {

1037 if (I)

1039

1040 if (I->getOpcode() == LdStOp)

1042

1044 if (II->getIntrinsicID() == MaskedOp)

1046 if (II->getIntrinsicID() == GatScatOp)

1048 }

1049

1051 };

1052

1053 switch (I->getOpcode()) {

1054 case Instruction::ZExt:

1055 case Instruction::SExt:

1056 case Instruction::FPExt:

1057 return getLoadStoreKind(I->getOperand(0), Instruction::Load,

1058 Intrinsic::masked_load, Intrinsic::masked_gather);

1059 case Instruction::Trunc:

1060 case Instruction::FPTrunc:

1061 if (I->hasOneUse())

1062 return getLoadStoreKind(*I->user_begin(), Instruction::Store,

1063 Intrinsic::masked_store,

1064 Intrinsic::masked_scatter);

1065 break;

1066 default:

1068 }

1069

1071}

1072

1076 assert((I == nullptr || I->getOpcode() == Opcode) &&

1077 "Opcode should reflect passed instruction.");

1079 TTIImpl->getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);

1080 assert(Cost >= 0 && "TTI should not produce negative costs!");

1081 return Cost;

1082}

1083

1085 unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index,

1088 TTIImpl->getExtractWithExtendCost(Opcode, Dst, VecTy, Index, CostKind);

1089 assert(Cost >= 0 && "TTI should not produce negative costs!");

1090 return Cost;

1091}

1092

1095 assert((I == nullptr || I->getOpcode() == Opcode) &&

1096 "Opcode should reflect passed instruction.");

1098 assert(Cost >= 0 && "TTI should not produce negative costs!");

1099 return Cost;

1100}

1101

1106 assert((I == nullptr || I->getOpcode() == Opcode) &&

1107 "Opcode should reflect passed instruction.");

1109 Opcode, ValTy, CondTy, VecPred, CostKind, Op1Info, Op2Info, I);

1110 assert(Cost >= 0 && "TTI should not produce negative costs!");

1111 return Cost;

1112}

1113

1116 const Value *Op0, const Value *Op1) const {

1117 assert((Opcode == Instruction::InsertElement ||

1118 Opcode == Instruction::ExtractElement) &&

1119 "Expecting Opcode to be insertelement/extractelement.");

1121 TTIImpl->getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1);

1122 assert(Cost >= 0 && "TTI should not produce negative costs!");

1123 return Cost;

1124}

1125

1129 ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx) const {

1130 assert((Opcode == Instruction::InsertElement ||

1131 Opcode == Instruction::ExtractElement) &&

1132 "Expecting Opcode to be insertelement/extractelement.");

1134 Opcode, Val, CostKind, Index, Scalar, ScalarUserAndIdx);

1135 assert(Cost >= 0 && "TTI should not produce negative costs!");

1136 return Cost;

1137}

1138

1142 unsigned Index) const {

1143

1144

1145

1147 assert(Cost >= 0 && "TTI should not produce negative costs!");

1148 return Cost;

1149}

1150

1153 unsigned Index) const {

1155 TTIImpl->getIndexedVectorInstrCostFromEnd(Opcode, Val, CostKind, Index);

1156 assert(Cost >= 0 && "TTI should not produce negative costs!");

1157 return Cost;

1158}

1159

1162 assert((Opcode == Instruction::InsertValue ||

1163 Opcode == Instruction::ExtractValue) &&

1164 "Expecting Opcode to be insertvalue/extractvalue.");

1166 assert(Cost >= 0 && "TTI should not produce negative costs!");

1167 return Cost;

1168}

1169

1171 Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts,

1174 EltTy, ReplicationFactor, VF, DemandedDstElts, CostKind);

1175 assert(Cost >= 0 && "TTI should not produce negative costs!");

1176 return Cost;

1177}

1178

1183 assert((I == nullptr || I->getOpcode() == Opcode) &&

1184 "Opcode should reflect passed instruction.");

1187 assert(Cost >= 0 && "TTI should not produce negative costs!");

1188 return Cost;

1189}

1190

1194 bool UseMaskForCond, bool UseMaskForGaps) const {

1197 UseMaskForCond, UseMaskForGaps);

1198 assert(Cost >= 0 && "TTI should not produce negative costs!");

1199 return Cost;

1200}

1201

1206 assert(Cost >= 0 && "TTI should not produce negative costs!");

1207 return Cost;

1208}

1209

1214 assert(Cost >= 0 && "TTI should not produce negative costs!");

1215 return Cost;

1216}

1217

1223 assert(Cost >= 0 && "TTI should not produce negative costs!");

1224 return Cost;

1225}

1226

1228 return TTIImpl->getNumberOfParts(Tp);

1229}

1230

1235 TTIImpl->getAddressComputationCost(PtrTy, SE, Ptr, CostKind);

1236 assert(Cost >= 0 && "TTI should not produce negative costs!");

1237 return Cost;

1238}

1239

1242 assert(Cost >= 0 && "TTI should not produce negative costs!");

1243 return Cost;

1244}

1245

1247 return TTIImpl->getMaxMemIntrinsicInlineSizeThreshold();

1248}

1249

1251 unsigned Opcode, VectorType *Ty, std::optional FMF,

1254 TTIImpl->getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);

1255 assert(Cost >= 0 && "TTI should not produce negative costs!");

1256 return Cost;

1257}

1258

1263 TTIImpl->getMinMaxReductionCost(IID, Ty, FMF, CostKind);

1264 assert(Cost >= 0 && "TTI should not produce negative costs!");

1265 return Cost;

1266}

1267

1269 unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty,

1271 return TTIImpl->getExtendedReductionCost(Opcode, IsUnsigned, ResTy, Ty, FMF,

1273}

1274

1276 bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty,

1278 return TTIImpl->getMulAccReductionCost(IsUnsigned, RedOpcode, ResTy, Ty,

1280}

1281

1284 return TTIImpl->getCostOfKeepingLiveOverCall(Tys);

1285}

1286

1289 return TTIImpl->getTgtMemIntrinsic(Inst, Info);

1290}

1291

1293 return TTIImpl->getAtomicMemIntrinsicMaxElementSize();

1294}

1295

1297 IntrinsicInst *Inst, Type *ExpectedType, bool CanCreate) const {

1298 return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType,

1299 CanCreate);

1300}

1301

1304 unsigned DestAddrSpace, Align SrcAlign, Align DestAlign,

1305 std::optional<uint32_t> AtomicElementSize) const {

1306 return TTIImpl->getMemcpyLoopLoweringType(Context, Length, SrcAddrSpace,

1307 DestAddrSpace, SrcAlign, DestAlign,

1308 AtomicElementSize);

1309}

1310

1313 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,

1315 std::optional<uint32_t> AtomicCpySize) const {

1316 TTIImpl->getMemcpyLoopResidualLoweringType(

1317 OpsOut, Context, RemainingBytes, SrcAddrSpace, DestAddrSpace, SrcAlign,

1318 DestAlign, AtomicCpySize);

1319}

1320

1322 const Function *Callee) const {

1323 return TTIImpl->areInlineCompatible(Caller, Callee);

1324}

1325

1326unsigned

1329 unsigned DefaultCallPenalty) const {

1330 return TTIImpl->getInlineCallPenalty(F, Call, DefaultCallPenalty);

1331}

1332

1336 return TTIImpl->areTypesABICompatible(Caller, Callee, Types);

1337}

1338

1340 Type *Ty) const {

1341 return TTIImpl->isIndexedLoadLegal(Mode, Ty);

1342}

1343

1345 Type *Ty) const {

1346 return TTIImpl->isIndexedStoreLegal(Mode, Ty);

1347}

1348

1350 return TTIImpl->getLoadStoreVecRegBitWidth(AS);

1351}

1352

1354 return TTIImpl->isLegalToVectorizeLoad(LI);

1355}

1356

1358 return TTIImpl->isLegalToVectorizeStore(SI);

1359}

1360

1362 unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const {

1363 return TTIImpl->isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,

1364 AddrSpace);

1365}

1366

1368 unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const {

1369 return TTIImpl->isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,

1370 AddrSpace);

1371}

1372

1375 return TTIImpl->isLegalToVectorizeReduction(RdxDesc, VF);

1376}

1377

1379 return TTIImpl->isElementTypeLegalForScalableVector(Ty);

1380}

1381

1383 unsigned LoadSize,

1384 unsigned ChainSizeInBytes,

1386 return TTIImpl->getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);

1387}

1388

1390 unsigned StoreSize,

1391 unsigned ChainSizeInBytes,

1393 return TTIImpl->getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);

1394}

1395

1397 bool IsEpilogue) const {

1398 return TTIImpl->preferFixedOverScalableIfEqualCost(IsEpilogue);

1399}

1400

1402 Type *Ty) const {

1403 return TTIImpl->preferInLoopReduction(Kind, Ty);

1404}

1405

1407 return TTIImpl->preferAlternateOpcodeVectorization();

1408}

1409

1411 return TTIImpl->preferPredicatedReductionSelect();

1412}

1413

1415 return TTIImpl->preferEpilogueVectorization();

1416}

1417

1419 return TTIImpl->shouldConsiderVectorizationRegPressure();

1420}

1421

1424 return TTIImpl->getVPLegalizationStrategy(VPI);

1425}

1426

1428 return TTIImpl->hasArmWideBranch(Thumb);

1429}

1430

1432 return TTIImpl->getFeatureMask(F);

1433}

1434

1436 return TTIImpl->getPriorityMask(F);

1437}

1438

1440 return TTIImpl->isMultiversionedFunction(F);

1441}

1442

1444 return TTIImpl->getMaxNumArgs();

1445}

1446

1448 return TTIImpl->shouldExpandReduction(II);

1449}

1450

1454 return TTIImpl->getPreferredExpandedReductionShuffle(II);

1455}

1456

1458 return TTIImpl->getGISelRematGlobalCost();

1459}

1460

1462 return TTIImpl->getMinTripCountTailFoldingThreshold();

1463}

1464

1466 return TTIImpl->supportsScalableVectors();

1467}

1468

1470 return TTIImpl->enableScalableVectorization();

1471}

1472

1474 return TTIImpl->hasActiveVectorLength();

1475}

1476

1479 return TTIImpl->isProfitableToSinkOperands(I, OpsToSink);

1480}

1481

1483 return TTIImpl->isVectorShiftByScalarCheap(Ty);

1484}

1485

1486unsigned

1489 return TTIImpl->getNumBytesToPadGlobalArray(Size, ArrayType);

1490}

1491

1494 SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const {

1495 return TTIImpl->collectKernelLaunchBounds(F, LB);

1496}

1497

1499 return TTIImpl->allowVectorElementIndexingUsingGEP();

1500}

1501

1503

1505

1509

1512 assert(F.isIntrinsic() && "Should not request TTI for intrinsics");

1513 return TTICallback(F);

1514}

1515

1517

1519 return Result(F.getDataLayout());

1520}

1521

1522

1524 "Target Transform Information", false, true)

1526

1528

1531

1535

1538 TTI = TIRA.run(F, DummyFAM);

1539 return *TTI;

1540}

1541

for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))

static cl::opt< bool > ForceNestedLoop("force-nested-hardware-loop", cl::Hidden, cl::init(false), cl::desc("Force allowance of nested hardware loops"))

static cl::opt< bool > ForceHardwareLoopPHI("force-hardware-loop-phi", cl::Hidden, cl::init(false), cl::desc("Force hardware loop counter to be updated through a phi"))

This file provides various utilities for inspecting and working with the control flow graph in LLVM I...

Module.h This file contains the declarations for the Module class.

uint64_t IntrinsicInst * II

#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)

This file defines the SmallVector class.

static SymbolRef::Type getType(const Symbol *Sym)

This file provides helpers for the implementation of a TargetTransformInfo-conforming class.

static cl::opt< unsigned > PredictableBranchThreshold("predictable-branch-threshold", cl::init(99), cl::Hidden, cl::desc("Use this to override the target's predictable branch threshold (%)."))

static cl::opt< bool > EnableReduxCost("costmodel-reduxcost", cl::init(false), cl::Hidden, cl::desc("Recognize reduction patterns."))

static cl::opt< unsigned > MinPageSize("min-page-size", cl::init(0), cl::Hidden, cl::desc("Use this to override the target's minimum page size."))

static cl::opt< unsigned > CacheLineSize("cache-line-size", cl::init(0), cl::Hidden, cl::desc("Use this to override the target cache line size when " "specified by the user."))

This pass exposes codegen information to IR-level passes.

Class for arbitrary precision integers.

an instruction to allocate memory on the stack

This class represents an incoming formal argument to a Function.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

Class to represent array types.

A cache of @llvm.assume calls within a function.

LLVM Basic Block Representation.

BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...

Conditional or Unconditional Branch instruction.

Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...

Function * getCalledFunction() const

Returns the function called, or null if this is an indirect function invocation or the function signa...

User::op_iterator arg_begin()

Return the iterator pointing to the beginning of the argument list.

User::op_iterator arg_end()

Return the iterator pointing to the end of the argument list.

Predicate

This enumeration lists the possible predicates for CmpInst subclasses.

This is an important base class in LLVM.

A parsed version of the target data layout string in and methods for querying it.

Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.

LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const

Return true if the (end of the) basic block BB dominates the use U.

Convenience struct for specifying and reasoning about fast-math flags.

Class to represent function types.

FunctionType * getFunctionType() const

Returns the FunctionType for me.

ImmutablePass class - This class is used to provide information that does not need to be run.

The core instruction combiner logic.

LLVM_ABI IntrinsicCostAttributes(Intrinsic::ID Id, const CallBase &CI, InstructionCost ScalarCost=InstructionCost::getInvalid(), bool TypeBasedOnly=false, TargetLibraryInfo const *LibInfo=nullptr)

Definition TargetTransformInfo.cpp:75

A wrapper class for inspecting calls to intrinsic functions.

This is an important class for using LLVM in a threaded context.

An instruction for reading from memory.

Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...

void perform(const LoopInfo *LI)

Traverse the loop blocks and store the DFS result.

LoopT * getLoopFor(const BlockT *BB) const

Return the inner most loop that BB lives in.

Represents a single loop in the control flow graph.

Information for memory intrinsic cost model.

Analysis providing profile information.

The RecurrenceDescriptor is used to identify recurrences variables in a loop.

This class represents a constant integer value.

This class represents an analyzed expression in the program.

The main scalar evolution driver.

LLVM_ABI uint64_t getTypeSizeInBits(Type *Ty) const

Return the size in bits of the specified type, for which isSCEVable must return true.

LLVM_ABI bool isLoopInvariant(const SCEV *S, const Loop *L)

Return true if the value of the given SCEV is unchanging in the specified loop.

LLVM_ABI const SCEV * getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind=Exact)

Return the number of times the backedge executes before the given exit would be taken; if not exactly...

This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

StackOffset holds a fixed and a scalable offset in bytes.

An instruction for storing to memory.

Analysis pass providing the TargetTransformInfo.

LLVM_ABI Result run(const Function &F, FunctionAnalysisManager &)

Definition TargetTransformInfo.cpp:1510

TargetTransformInfo Result

LLVM_ABI TargetIRAnalysis()

Default construct a target IR analysis.

Definition TargetTransformInfo.cpp:1504

Provides information about what library functions are available for the current target.

bool getLibFunc(StringRef funcName, LibFunc &F) const

Searches for a particular function name.

StringRef getName(LibFunc F) const

bool isFunctionVectorizable(StringRef F, const ElementCount &VF) const

virtual ~TargetTransformInfoImplBase()

CRTP base class for use as a mix-in that aids implementing a TargetTransformInfo-compatible class.

Wrapper pass for TargetTransformInfo.

TargetTransformInfoWrapperPass()

We must provide a default constructor for the pass but it should never be used.

Definition TargetTransformInfo.cpp:1529

TargetTransformInfo & getTTI(const Function &F)

Definition TargetTransformInfo.cpp:1536

This pass provides access to the codegen interfaces that are needed for IR-level transformations.

LLVM_ABI bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const

Definition TargetTransformInfo.cpp:1287

LLVM_ABI Value * getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType, bool CanCreate=true) const

Definition TargetTransformInfo.cpp:1296

LLVM_ABI bool isLegalToVectorizeLoad(LoadInst *LI) const

Definition TargetTransformInfo.cpp:1353

LLVM_ABI std::optional< unsigned > getVScaleForTuning() const

Definition TargetTransformInfo.cpp:800

static LLVM_ABI CastContextHint getCastContextHint(const Instruction *I)

Calculates a CastContextHint from I.

Definition TargetTransformInfo.cpp:1030

LLVM_ABI unsigned getMaxNumArgs() const

Definition TargetTransformInfo.cpp:1443

LLVM_ABI bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const

Return false if a AS0 address cannot possibly alias a AS1 address.

Definition TargetTransformInfo.cpp:312

LLVM_ABI bool isLegalMaskedScatter(Type *DataType, Align Alignment) const

Return true if the target supports masked scatter.

Definition TargetTransformInfo.cpp:507

LLVM_ABI bool shouldBuildLookupTables() const

Return true if switches should be turned into lookup tables for the target.

Definition TargetTransformInfo.cpp:597

LLVM_ABI bool isLegalToVectorizeStore(StoreInst *SI) const

Definition TargetTransformInfo.cpp:1357

LLVM_ABI InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index=-1, const Value *Op0=nullptr, const Value *Op1=nullptr) const

Definition TargetTransformInfo.cpp:1114

LLVM_ABI InstructionCost getMulAccReductionCost(bool IsUnsigned, unsigned RedOpcode, Type *ResTy, VectorType *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const

Calculate the cost of an extended reduction pattern, similar to getArithmeticReductionCost of an Add/...

Definition TargetTransformInfo.cpp:1275

LLVM_ABI bool areTypesABICompatible(const Function *Caller, const Function *Callee, ArrayRef< Type * > Types) const

Definition TargetTransformInfo.cpp:1333

LLVM_ABI bool enableAggressiveInterleaving(bool LoopHasReductions) const

Don't restrict interleaved unrolling to small loops.

Definition TargetTransformInfo.cpp:663

LLVM_ABI InstructionCost getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract, TTI::TargetCostKind CostKind, bool ForPoisonSrc=true, ArrayRef< Value * > VL={}) const

Estimate the overhead of scalarizing an instruction.

Definition TargetTransformInfo.cpp:638

LLVM_ABI bool isMultiversionedFunction(const Function &F) const

Returns true if this is an instance of a function with multiple versions.

Definition TargetTransformInfo.cpp:1439

LLVM_ABI InstructionUniformity getInstructionUniformity(const Value *V) const

Get target-specific uniformity information for an instruction.

Definition TargetTransformInfo.cpp:298

LLVM_ABI bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const

Return true if it is faster to check if a floating-point value is NaN (or not-NaN) versus a compariso...

Definition TargetTransformInfo.cpp:718

LLVM_ABI bool isLegalMaskedStore(Type *DataType, Align Alignment, unsigned AddressSpace, MaskKind MaskKind=VariableOrConstantMask) const

Return true if the target supports masked store.

Definition TargetTransformInfo.cpp:468

LLVM_ABI bool supportsEfficientVectorElementLoadStore() const

If target has efficient vector element load/store instructions, it can return true here so that inser...

Definition TargetTransformInfo.cpp:651

LLVM_ABI unsigned getAssumedAddrSpace(const Value *V) const

Definition TargetTransformInfo.cpp:336

LLVM_ABI bool preferAlternateOpcodeVectorization() const

Definition TargetTransformInfo.cpp:1406

LLVM_ABI bool shouldDropLSRSolutionIfLessProfitable() const

Return true if LSR should drop a found solution if it's calculated to be less profitable than the bas...

Definition TargetTransformInfo.cpp:443

LLVM_ABI bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, const TargetTransformInfo::LSRCost &C2) const

Return true if LSR cost of C1 is lower than C2.

Definition TargetTransformInfo.cpp:434

LLVM_ABI unsigned getPrefetchDistance() const

Definition TargetTransformInfo.cpp:854

LLVM_ABI Type * getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicElementSize=std::nullopt) const

Definition TargetTransformInfo.cpp:1302

LLVM_ABI bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const

Return true if the target supports masked expand load.

Definition TargetTransformInfo.cpp:527

LLVM_ABI bool prefersVectorizedAddressing() const

Return true if target doesn't mind addresses in vectors.

Definition TargetTransformInfo.cpp:562

LLVM_ABI InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, OperandValueInfo Op1Info={OK_AnyValue, OP_None}, OperandValueInfo Op2Info={OK_AnyValue, OP_None}, const Instruction *I=nullptr) const

Definition TargetTransformInfo.cpp:1102

LLVM_ABI bool hasBranchDivergence(const Function *F=nullptr) const

Return true if branch divergence exists.

Definition TargetTransformInfo.cpp:293

LLVM_ABI MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const

Definition TargetTransformInfo.cpp:669

LLVM_ABI void getUnrollingPreferences(Loop *L, ScalarEvolution &, UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const

Get target-customized preferences for the generic loop unrolling transformation.

Definition TargetTransformInfo.cpp:401

LLVM_ABI bool shouldBuildLookupTablesForConstant(Constant *C) const

Return true if switches should be turned into lookup tables containing this constant value for the ta...

Definition TargetTransformInfo.cpp:601

LLVM_ABI bool supportsTailCallFor(const CallBase *CB) const

If target supports tail call on CB.

Definition TargetTransformInfo.cpp:659

LLVM_ABI std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const

Targets can implement their own combinations for target-specific intrinsics.

Definition TargetTransformInfo.cpp:379

LLVM_ABI bool isProfitableLSRChainElement(Instruction *I) const

Definition TargetTransformInfo.cpp:447

LLVM_ABI TypeSize getRegisterBitWidth(RegisterKind K) const

Definition TargetTransformInfo.cpp:787

MaskKind

Some targets only support masked load/store with a constant mask.

LLVM_ABI unsigned getInlineCallPenalty(const Function *F, const CallBase &Call, unsigned DefaultCallPenalty) const

Returns a penalty for invoking call Call in F.

Definition TargetTransformInfo.cpp:1327

LLVM_ABI bool hasActiveVectorLength() const

Definition TargetTransformInfo.cpp:1473

LLVM_ABI bool isExpensiveToSpeculativelyExecute(const Instruction *I) const

Return true if the cost of the instruction is too high to speculatively execute and should be kept be...

Definition TargetTransformInfo.cpp:713

LLVM_ABI bool preferFixedOverScalableIfEqualCost(bool IsEpilogue) const

Definition TargetTransformInfo.cpp:1396

LLVM_ABI bool isLegalMaskedGather(Type *DataType, Align Alignment) const

Return true if the target supports masked gather.

Definition TargetTransformInfo.cpp:496

LLVM_ABI InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, OperandValueInfo OpdInfo={OK_AnyValue, OP_None}, const Instruction *I=nullptr) const

Definition TargetTransformInfo.cpp:1179

LLVM_ABI std::optional< unsigned > getMaxVScale() const

Definition TargetTransformInfo.cpp:796

LLVM_ABI InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts, TTI::TargetCostKind CostKind) const

Definition TargetTransformInfo.cpp:1170

LLVM_ABI bool allowVectorElementIndexingUsingGEP() const

Returns true if GEP should not be used to index into vectors for this target.

Definition TargetTransformInfo.cpp:1498

LLVM_ABI InstructionCost getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef< unsigned > Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, bool UseMaskForCond=false, bool UseMaskForGaps=false) const

Definition TargetTransformInfo.cpp:1191

LLVM_ABI bool isSingleThreaded() const

Definition TargetTransformInfo.cpp:340

LLVM_ABI std::optional< Value * > simplifyDemandedVectorEltsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp) const

Can be used to implement target-specific instruction combining.

Definition TargetTransformInfo.cpp:391

LLVM_ABI bool enableOrderedReductions() const

Return true if we should be enabling ordered reductions for the target.

Definition TargetTransformInfo.cpp:549

LLVM_ABI unsigned getInliningCostBenefitAnalysisProfitableMultiplier() const

Definition TargetTransformInfo.cpp:229

LLVM_ABI InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *DstTy, VectorType *SrcTy, ArrayRef< int > Mask={}, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, int Index=0, VectorType *SubTp=nullptr, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const

Definition TargetTransformInfo.cpp:993

LLVM_ABI InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) const

Definition TargetTransformInfo.cpp:1203

LLVM_ABI InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const

Calculate the cost of vector reduction intrinsics.

Definition TargetTransformInfo.cpp:1250

LLVM_ABI unsigned getAtomicMemIntrinsicMaxElementSize() const

Definition TargetTransformInfo.cpp:1292

LLVM_ABI InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const

Definition TargetTransformInfo.cpp:1073

LLVM_ABI bool LSRWithInstrQueries() const

Return true if the loop strength reduce pass should make Instruction* based TTI queries to isLegalAdd...

Definition TargetTransformInfo.cpp:575

LLVM_ABI unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, unsigned ChainSizeInBytes, VectorType *VecTy) const

Definition TargetTransformInfo.cpp:1389

LLVM_ABI VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const

Definition TargetTransformInfo.cpp:1423

static LLVM_ABI PartialReductionExtendKind getPartialReductionExtendKind(Instruction *I)

Get the kind of extension that an instruction represents.

Definition TargetTransformInfo.cpp:1009

LLVM_ABI bool shouldConsiderVectorizationRegPressure() const

Definition TargetTransformInfo.cpp:1418

LLVM_ABI bool enableWritePrefetching() const

Definition TargetTransformInfo.cpp:869

LLVM_ABI bool shouldTreatInstructionLikeSelect(const Instruction *I) const

Should the Select Optimization pass treat the given instruction like a select, potentially converting...

Definition TargetTransformInfo.cpp:677

LLVM_ABI bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const

Definition TargetTransformInfo.cpp:326

LLVM_ABI bool shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const

Definition TargetTransformInfo.cpp:808

LLVM_ABI TailFoldingStyle getPreferredTailFoldingStyle(bool IVUpdateMayOverflow=true) const

Query the target what the preferred style of tail folding is.

Definition TargetTransformInfo.cpp:373

LLVM_ABI InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, ArrayRef< const Value * > Operands, Type *AccessType=nullptr, TargetCostKind CostKind=TCK_SizeAndLatency) const

Estimate the cost of a GEP operation when lowered.

Definition TargetTransformInfo.cpp:252

LLVM_ABI bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const

Definition TargetTransformInfo.cpp:1367

LLVM_ABI bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace) const

Return true is the target supports interleaved access for the given vector type VTy,...

Definition TargetTransformInfo.cpp:537

LLVM_ABI unsigned getRegUsageForType(Type *Ty) const

Returns the estimated number of registers required to represent Ty.

Definition TargetTransformInfo.cpp:593

LLVM_ABI bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const

\Returns true if the target supports broadcasting a load to a vector of type <NumElements x ElementTy...

Definition TargetTransformInfo.cpp:491

LLVM_ABI bool isIndexedStoreLegal(enum MemIndexedMode Mode, Type *Ty) const

Definition TargetTransformInfo.cpp:1344

LLVM_ABI std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const

Definition TargetTransformInfo.cpp:345

LLVM_ABI InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, std::optional< FastMathFlags > FMF, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const

Calculate the cost of an extended reduction pattern, similar to getArithmeticReductionCost of a reduc...

Definition TargetTransformInfo.cpp:1268

LLVM_ABI unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const

Definition TargetTransformInfo.cpp:1349

LLVM_ABI ReductionShuffle getPreferredExpandedReductionShuffle(const IntrinsicInst *II) const

Definition TargetTransformInfo.cpp:1452

static LLVM_ABI OperandValueInfo getOperandInfo(const Value *V)

Collect properties of V used in cost analysis, e.g. OP_PowerOf2.

Definition TargetTransformInfo.cpp:892

LLVM_ABI unsigned getRegisterClassForType(bool Vector, Type *Ty=nullptr) const

Definition TargetTransformInfo.cpp:778

LLVM_ABI bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace=0, Instruction *I=nullptr, int64_t ScalableOffset=0) const

Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...

Definition TargetTransformInfo.cpp:424

LLVM_ABI PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const

Return hardware support for population count.

Definition TargetTransformInfo.cpp:705

LLVM_ABI unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const

Definition TargetTransformInfo.cpp:267

LLVM_ABI bool isElementTypeLegalForScalableVector(Type *Ty) const

Definition TargetTransformInfo.cpp:1378

LLVM_ABI bool forceScalarizeMaskedGather(VectorType *Type, Align Alignment) const

Return true if the target forces scalarizing of llvm.masked.gather intrinsics.

Definition TargetTransformInfo.cpp:512

LLVM_ABI unsigned getMaxPrefetchIterationsAhead() const

Definition TargetTransformInfo.cpp:865

LLVM_ABI bool canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const

Return true if globals in this address space can have initializers other than undef.

Definition TargetTransformInfo.cpp:331

LLVM_ABI ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const

Definition TargetTransformInfo.cpp:813

LLVM_ABI InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TargetCostKind CostKind) const

Definition TargetTransformInfo.cpp:755

LLVM_ABI bool enableMaskedInterleavedAccessVectorization() const

Enable matching of interleaved access groups that contain predicated accesses or gaps and therefore v...

Definition TargetTransformInfo.cpp:686

LLVM_ABI InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty, TargetCostKind CostKind, Instruction *Inst=nullptr) const

Return the expected cost of materialization for the given integer immediate of the specified type for...

Definition TargetTransformInfo.cpp:745

LLVM_ABI bool isLegalStridedLoadStore(Type *DataType, Align Alignment) const

Return true if the target supports strided load.

Definition TargetTransformInfo.cpp:532

LLVM_ABI TargetTransformInfo & operator=(TargetTransformInfo &&RHS)

Definition TargetTransformInfo.cpp:214

LLVM_ABI InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF=FastMathFlags(), TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const

Definition TargetTransformInfo.cpp:1259

TargetCostKind

The kind of cost model.

@ TCK_RecipThroughput

Reciprocal throughput.

LLVM_ABI InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr, const TargetLibraryInfo *TLibInfo=nullptr) const

This is an approximation of reciprocal throughput of a math/logic op.

Definition TargetTransformInfo.cpp:957

LLVM_ABI bool enableSelectOptimize() const

Should the Select Optimization pass be enabled and ran.

Definition TargetTransformInfo.cpp:673

LLVM_ABI bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const

Return any intrinsic address operand indexes which may be rewritten if they use a flat address space ...

Definition TargetTransformInfo.cpp:321

OperandValueProperties

Additional properties of an operand's values.

LLVM_ABI int getInliningLastCallToStaticBonus() const

Definition TargetTransformInfo.cpp:234

LLVM_ABI InstructionCost getPointersChainCost(ArrayRef< const Value * > Ptrs, const Value *Base, const PointersChainInfo &Info, Type *AccessTy, TargetCostKind CostKind=TTI::TCK_RecipThroughput) const

Estimate the cost of a chain of pointers (typically pointer operands of a chain of loads or stores wi...

Definition TargetTransformInfo.cpp:258

LLVM_ABI bool isVScaleKnownToBeAPowerOfTwo() const

Definition TargetTransformInfo.cpp:804

LLVM_ABI bool isIndexedLoadLegal(enum MemIndexedMode Mode, Type *Ty) const

Definition TargetTransformInfo.cpp:1339

LLVM_ABI unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const

Definition TargetTransformInfo.cpp:818

LLVM_ABI bool isLegalICmpImmediate(int64_t Imm) const

Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...

Definition TargetTransformInfo.cpp:420

LLVM_ABI bool isTypeLegal(Type *Ty) const

Return true if this type is legal.

Definition TargetTransformInfo.cpp:589

LLVM_ABI bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc, ElementCount VF) const

Definition TargetTransformInfo.cpp:1373

LLVM_ABI std::optional< unsigned > getCacheAssociativity(CacheLevel Level) const

Definition TargetTransformInfo.cpp:845

LLVM_ABI bool isLegalNTLoad(Type *DataType, Align Alignment) const

Return true if the target supports nontemporal load.

Definition TargetTransformInfo.cpp:487

LLVM_ABI InstructionCost getMemcpyCost(const Instruction *I) const

Definition TargetTransformInfo.cpp:1240

LLVM_ABI unsigned adjustInliningThreshold(const CallBase *CB) const

Definition TargetTransformInfo.cpp:239

LLVM_ABI bool isLegalAddImmediate(int64_t Imm) const

Return true if the specified immediate is legal add immediate, that is the target has add instruction...

Definition TargetTransformInfo.cpp:412

LLVM_ABI bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, int RetIdx) const

Identifies if the vector form of the intrinsic that returns a struct is overloaded at the struct elem...

Definition TargetTransformInfo.cpp:633

LLVM_ABI unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, unsigned ChainSizeInBytes, VectorType *VecTy) const

Definition TargetTransformInfo.cpp:1382

LLVM_ABI InstructionCost getMemIntrinsicInstrCost(const MemIntrinsicCostAttributes &MICA, TTI::TargetCostKind CostKind) const

Definition TargetTransformInfo.cpp:1210

LLVM_ABI bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC, TargetLibraryInfo *LibInfo) const

Return true if the target can save a compare for loop count, for example hardware loop saves a compar...

Definition TargetTransformInfo.cpp:455

LLVM_ABI bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const

Definition TargetTransformInfo.cpp:618

LLVM_ABI Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const

Rewrite intrinsic call II such that OldV will be replaced with NewV, which has a different address sp...

Definition TargetTransformInfo.cpp:349

LLVM_ABI InstructionCost getCostOfKeepingLiveOverCall(ArrayRef< Type * > Tys) const

Definition TargetTransformInfo.cpp:1283

LLVM_ABI unsigned getMinPrefetchStride(unsigned NumMemAccesses, unsigned NumStridedMemAccesses, unsigned NumPrefetches, bool HasCall) const

Some HW prefetchers can handle accesses up to a certain constant stride.

Definition TargetTransformInfo.cpp:858

LLVM_ABI bool shouldPrefetchAddressSpace(unsigned AS) const

Definition TargetTransformInfo.cpp:873

LLVM_ABI InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, TargetCostKind CostKind) const

Return the expected cost of materializing for the given integer immediate of the specified type.

Definition TargetTransformInfo.cpp:738

LLVM_ABI unsigned getMinVectorRegisterBitWidth() const

Definition TargetTransformInfo.cpp:792

LLVM_ABI InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE, const SCEV *Ptr, TTI::TargetCostKind CostKind) const

Definition TargetTransformInfo.cpp:1231

LLVM_ABI bool isLegalNTStore(Type *DataType, Align Alignment) const

Return true if the target supports nontemporal store.

Definition TargetTransformInfo.cpp:482

LLVM_ABI InstructionCost getPartialReductionCost(unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType, ElementCount VF, PartialReductionExtendKind OpAExtend, PartialReductionExtendKind OpBExtend, std::optional< unsigned > BinOp, TTI::TargetCostKind CostKind) const

Definition TargetTransformInfo.cpp:877

LLVM_ABI unsigned getFlatAddressSpace() const

Returns the address space ID for a target's 'flat' address space.

Definition TargetTransformInfo.cpp:317

LLVM_ABI bool preferToKeepConstantsAttached(const Instruction &Inst, const Function &Fn) const

It can be advantageous to detach complex constants from their uses to make their generation cheaper.

Definition TargetTransformInfo.cpp:764

LLVM_ABI bool hasArmWideBranch(bool Thumb) const

Definition TargetTransformInfo.cpp:1427

LLVM_ABI const char * getRegisterClassName(unsigned ClassID) const

Definition TargetTransformInfo.cpp:783

LLVM_ABI bool preferEpilogueVectorization() const

Return true if the loop vectorizer should consider vectorizing an otherwise scalar epilogue loop.

Definition TargetTransformInfo.cpp:1414

LLVM_ABI bool shouldConsiderAddressTypePromotion(const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const

Definition TargetTransformInfo.cpp:828

LLVM_ABI bool useAA() const

Definition TargetTransformInfo.cpp:587

LLVM_ABI APInt getPriorityMask(const Function &F) const

Returns a bitmask constructed from the target-features or fmv-features metadata of a function corresp...

Definition TargetTransformInfo.cpp:1435

LLVM_ABI BranchProbability getPredictableBranchThreshold() const

If a branch or a select condition is skewed in one direction by more than this factor,...

Definition TargetTransformInfo.cpp:283

LLVM_ABI TargetTransformInfo(std::unique_ptr< const TargetTransformInfoImplBase > Impl)

Construct a TTI object using a type implementing the Concept API below.

Definition TargetTransformInfo.cpp:61

LLVM_ABI bool preferInLoopReduction(RecurKind Kind, Type *Ty) const

Definition TargetTransformInfo.cpp:1401

LLVM_ABI unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const

Definition TargetTransformInfo.cpp:243

LLVM_ABI bool hasConditionalLoadStoreForType(Type *Ty, bool IsStore) const

Definition TargetTransformInfo.cpp:773

LLVM_ABI unsigned getCacheLineSize() const

Definition TargetTransformInfo.cpp:834

LLVM_ABI bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, unsigned AddressSpace=0, Align Alignment=Align(1), unsigned *Fast=nullptr) const

Determine if the target supports unaligned memory accesses.

Definition TargetTransformInfo.cpp:695

LLVM_ABI int getInlinerVectorBonusPercent() const

Definition TargetTransformInfo.cpp:248

LLVM_ABI unsigned getEpilogueVectorizationMinVF() const

Definition TargetTransformInfo.cpp:364

LLVM_ABI void collectKernelLaunchBounds(const Function &F, SmallVectorImpl< std::pair< StringRef, int64_t > > &LB) const

Collect kernel launch bounds for F into LB.

Definition TargetTransformInfo.cpp:1492

PopcntSupportKind

Flags indicating the kind of support for population count.

LLVM_ABI bool preferPredicatedReductionSelect() const

Definition TargetTransformInfo.cpp:1410

LLVM_ABI InstructionCost getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty) const

Return the expected cost for the given integer when optimising for size.

Definition TargetTransformInfo.cpp:728

LLVM_ABI AddressingModeKind getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const

Return the preferred addressing mode LSR should make efforts to generate.

Definition TargetTransformInfo.cpp:463

LLVM_ABI bool isLoweredToCall(const Function *F) const

Test whether calls to a function lower to actual program function calls.

Definition TargetTransformInfo.cpp:354

LLVM_ABI bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const

Definition TargetTransformInfo.cpp:1361

LLVM_ABI bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, AssumptionCache &AC, TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const

Query the target whether it would be profitable to convert the given loop into a hardware loop.

Definition TargetTransformInfo.cpp:358

LLVM_ABI unsigned getInliningThresholdMultiplier() const

Definition TargetTransformInfo.cpp:219

LLVM_ABI InstructionCost getBranchMispredictPenalty() const

Returns estimated penalty of a branch misprediction in latency.

Definition TargetTransformInfo.cpp:289

LLVM_ABI unsigned getNumberOfRegisters(unsigned ClassID) const

Definition TargetTransformInfo.cpp:769

LLVM_ABI bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask) const

Return true if this is an alternating opcode pattern that can be lowered to a single instruction on t...

Definition TargetTransformInfo.cpp:501

LLVM_ABI bool isProfitableToHoist(Instruction *I) const

Return true if it is profitable to hoist instruction in the then/else to before if.

Definition TargetTransformInfo.cpp:583

LLVM_ABI bool supportsScalableVectors() const

Definition TargetTransformInfo.cpp:1465

LLVM_ABI bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const

Return true if the given instruction (assumed to be a memory access instruction) has a volatile varia...

Definition TargetTransformInfo.cpp:557

LLVM_ABI bool isLegalMaskedCompressStore(Type *DataType, Align Alignment) const

Return true if the target supports masked compress store.

Definition TargetTransformInfo.cpp:522

LLVM_ABI std::optional< unsigned > getMinPageSize() const

Definition TargetTransformInfo.cpp:849

LLVM_ABI bool isFPVectorizationPotentiallyUnsafe() const

Indicate that it is potentially unsafe to automatically vectorize floating-point operations because t...

Definition TargetTransformInfo.cpp:690

LLVM_ABI InstructionCost getInsertExtractValueCost(unsigned Opcode, TTI::TargetCostKind CostKind) const

Definition TargetTransformInfo.cpp:1160

LLVM_ABI bool shouldBuildRelLookupTables() const

Return true if lookup tables should be turned into relative lookup tables.

Definition TargetTransformInfo.cpp:606

PartialReductionExtendKind

LLVM_ABI unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, Type *ScalarValTy) const

Definition TargetTransformInfo.cpp:823

LLVM_ABI std::optional< unsigned > getCacheSize(CacheLevel Level) const

Definition TargetTransformInfo.cpp:840

LLVM_ABI std::optional< Value * > simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed) const

Can be used to implement target-specific instruction combining.

Definition TargetTransformInfo.cpp:384

LLVM_ABI bool isLegalAddScalableImmediate(int64_t Imm) const

Return true if adding the specified scalable immediate is legal, that is the target has add instructi...

Definition TargetTransformInfo.cpp:416

LLVM_ABI bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, unsigned ScalarOpdIdx) const

Identifies if the vector form of the intrinsic has a scalar operand.

Definition TargetTransformInfo.cpp:623

LLVM_ABI bool hasDivRemOp(Type *DataType, bool IsSigned) const

Return true if the target has a unified operation to calculate division and remainder.

Definition TargetTransformInfo.cpp:553

LLVM_ABI InstructionCost getAltInstrCost(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput) const

Returns the cost estimation for alternating opcode pattern that can be lowered to a single instructio...

Definition TargetTransformInfo.cpp:984

LLVM_ABI bool enableInterleavedAccessVectorization() const

Enable matching of interleaved access groups.

Definition TargetTransformInfo.cpp:682

LLVM_ABI unsigned getMinTripCountTailFoldingThreshold() const

Definition TargetTransformInfo.cpp:1461

LLVM_ABI InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TargetCostKind CostKind) const

Estimate the cost of a given IR user when lowered.

Definition TargetTransformInfo.cpp:274

LLVM_ABI unsigned getMaxInterleaveFactor(ElementCount VF) const

Definition TargetTransformInfo.cpp:887

LLVM_ABI bool enableScalableVectorization() const

Definition TargetTransformInfo.cpp:1469

LLVM_ABI bool useFastCCForInternalCall(Function &F) const

Return true if the input function is internal, should use fastcc calling convention.

Definition TargetTransformInfo.cpp:614

LLVM_ABI bool isVectorShiftByScalarCheap(Type *Ty) const

Return true if it's significantly cheaper to shift a vector by a uniform scalar than by an amount whi...

Definition TargetTransformInfo.cpp:1482

LLVM_ABI bool isNumRegsMajorCostOfLSR() const

Return true if LSR major cost is number of registers.

Definition TargetTransformInfo.cpp:439

LLVM_ABI unsigned getInliningCostBenefitAnalysisSavingsMultiplier() const

Definition TargetTransformInfo.cpp:224

LLVM_ABI bool isLegalMaskedVectorHistogram(Type *AddrType, Type *DataType) const

Definition TargetTransformInfo.cpp:544

LLVM_ABI unsigned getGISelRematGlobalCost() const

Definition TargetTransformInfo.cpp:1457

LLVM_ABI unsigned getNumBytesToPadGlobalArray(unsigned Size, Type *ArrayType) const

Definition TargetTransformInfo.cpp:1487

MemIndexedMode

The type of load/store indexing.

LLVM_ABI bool isLegalMaskedLoad(Type *DataType, Align Alignment, unsigned AddressSpace, MaskKind MaskKind=VariableOrConstantMask) const

Return true if the target supports masked load.

Definition TargetTransformInfo.cpp:475

LLVM_ABI InstructionCost getIndexedVectorInstrCostFromEnd(unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index) const

Definition TargetTransformInfo.cpp:1151

LLVM_ABI bool areInlineCompatible(const Function *Caller, const Function *Callee) const

Definition TargetTransformInfo.cpp:1321

LLVM_ABI bool useColdCCForColdCall(Function &F) const

Return true if the input function which is cold at all call sites, should use coldcc calling conventi...

Definition TargetTransformInfo.cpp:610

LLVM_ABI InstructionCost getFPOpCost(Type *Ty) const

Return the expected cost of supporting the floating point operation of the specified type.

Definition TargetTransformInfo.cpp:722

LLVM_ABI bool supportsTailCalls() const

If the target supports tail calls.

Definition TargetTransformInfo.cpp:655

LLVM_ABI bool canMacroFuseCmp() const

Return true if the target can fuse a compare and branch.

Definition TargetTransformInfo.cpp:451

LLVM_ABI bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const

Query the target whether the specified address space cast from FromAS to ToAS is valid.

Definition TargetTransformInfo.cpp:307

LLVM_ABI unsigned getNumberOfParts(Type *Tp) const

Definition TargetTransformInfo.cpp:1227

LLVM_ABI InstructionCost getOperandsScalarizationOverhead(ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind) const

Estimate the overhead of scalarizing operands with the given types.

Definition TargetTransformInfo.cpp:646

AddressingModeKind

Which addressing mode Loop Strength Reduction will try to generate.

LLVM_ABI InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg, int64_t Scale, unsigned AddrSpace=0) const

Return the cost of the scaling factor used in the addressing mode represented by AM for this target,...

Definition TargetTransformInfo.cpp:566

LLVM_ABI bool isTruncateFree(Type *Ty1, Type *Ty2) const

Return true if it's free to truncate a value of type Ty1 to type Ty2.

Definition TargetTransformInfo.cpp:579

LLVM_ABI bool isProfitableToSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const

Return true if sinking I's operands to the same basic block as I is profitable, e....

Definition TargetTransformInfo.cpp:1477

LLVM_ABI void getMemcpyLoopResidualLoweringType(SmallVectorImpl< Type * > &OpsOut, LLVMContext &Context, unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, std::optional< uint32_t > AtomicCpySize=std::nullopt) const

Definition TargetTransformInfo.cpp:1311

LLVM_ABI bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const

Query the target whether it would be prefered to create a predicated vector loop, which can avoid the...

Definition TargetTransformInfo.cpp:368

LLVM_ABI bool forceScalarizeMaskedScatter(VectorType *Type, Align Alignment) const

Return true if the target forces scalarizing of llvm.masked.scatter intrinsics.

Definition TargetTransformInfo.cpp:517

LLVM_ABI bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx) const

Identifies if the vector form of the intrinsic is overloaded on the type of the operand at index OpdI...

Definition TargetTransformInfo.cpp:628

LLVM_ABI bool haveFastSqrt(Type *Ty) const

Return true if the hardware has a fast square-root instruction.

Definition TargetTransformInfo.cpp:709

LLVM_ABI bool shouldExpandReduction(const IntrinsicInst *II) const

Definition TargetTransformInfo.cpp:1447

LLVM_ABI uint64_t getMaxMemIntrinsicInlineSizeThreshold() const

Returns the maximum memset / memcpy size in bytes that still makes it profitable to inline the call.

Definition TargetTransformInfo.cpp:1246

ShuffleKind

The various kinds of shuffle patterns for vector queries.

LLVM_ABI APInt getFeatureMask(const Function &F) const

Returns a bitmask constructed from the target-features or fmv-features metadata of a function corresp...

Definition TargetTransformInfo.cpp:1431

LLVM_ABI void getPeelingPreferences(Loop *L, ScalarEvolution &SE, PeelingPreferences &PP) const

Get target-customized preferences for the generic loop peeling transformation.

Definition TargetTransformInfo.cpp:407

LLVM_ABI InstructionCost getCallInstrCost(Function *F, Type *RetTy, ArrayRef< Type * > Tys, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency) const

Definition TargetTransformInfo.cpp:1219

LLVM_ABI InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const

Definition TargetTransformInfo.cpp:1093

CastContextHint

Represents a hint about the context in which a cast is used.

@ Masked

The cast is used with a masked load/store.

@ None

The cast is not used with a load/store of any kind.

@ Normal

The cast is used with a normal load/store.

@ GatherScatter

The cast is used with a gather/scatter.

LLVM_ABI InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index, TTI::TargetCostKind CostKind) const

Definition TargetTransformInfo.cpp:1084

LLVM_ABI ~TargetTransformInfo()

OperandValueKind

Additional information about an operand's possible values.

@ OK_UniformConstantValue

@ OK_NonUniformConstantValue

CacheLevel

The possible cache levels.

The instances of the Type class are immutable: once they are created, they are never changed.

LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const

Return true if this is a type whose size is a known multiple of vscale.

static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)

Type * getScalarType() const

If this is a vector type, return the element type, otherwise return 'this'.

This is the common base class for vector predication intrinsics.

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

Base class of all SIMD vector types.

ElementCount getElementCount() const

Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...

constexpr ScalarTy getKnownMinValue() const

Returns the minimum value this quantity can represent.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

@ Fast

Attempts to make calls as fast as possible (e.g.

@ C

The default llvm calling convention, compatible with C.

initializer< Ty > init(const Ty &Val)

This is an optimization pass for GlobalISel generic memory operations.

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

LLVM_ABI Value * getSplatValue(const Value *V)

Get splat value if the input is a splat vector or return nullptr.

bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI)

Return true if the control flow in RPOTraversal is irreducible.

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

LLVM_ABI ImmutablePass * createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA)

Create an analysis pass wrapper around a TTI object.

Definition TargetTransformInfo.cpp:1543

RecurKind

These are the kinds of recurrences that we support.

constexpr unsigned BitWidth

OutputIt move(R &&Range, OutputIt Out)

Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.

auto predecessors(const MachineBasicBlock *BB)

AnalysisManager< Function > FunctionAnalysisManager

Convenience typedef for the Function analysis manager.

InstructionUniformity

Enum describing how instructions behave with respect to uniformity and divergence,...

@ AlwaysUniform

The result values are always uniform.

Implement std::hash so that hash_code can be used in STL containers.

This struct is a compact representation of a valid (non-zero power of two) alignment.

A special type used by analysis passes to provide an address that identifies that particular analysis...

Attributes of a target dependent hardware loop.

LLVM_ABI bool canAnalyze(LoopInfo &LI)

Definition TargetTransformInfo.cpp:65

HardwareLoopInfo()=delete

LLVM_ABI bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI, DominatorTree &DT, bool ForceNestedLoop=false, bool ForceHardwareLoopPHI=false)

Definition TargetTransformInfo.cpp:127

Information about a load/store intrinsic defined by the target.

Returns options for expansion of memcmp. IsZeroCmp is.

Describe known properties for a set of pointers.

Parameters that control the generic loop unrolling transformation.