LLVM: lib/CodeGen/CodeGenPrepare.cpp File Reference (original) (raw)

Go to the source code of this file.

Functions
STATISTIC (NumBlocksElim, "Number of blocks eliminated")
STATISTIC (NumPHIsElim, "Number of trivial PHIs eliminated")
STATISTIC (NumGEPsElim, "Number of GEPs converted to casts")
STATISTIC (NumCmpUses, "Number of uses of Cmp expressions replaced with uses of " "sunken Cmps")
STATISTIC (NumCastUses, "Number of uses of Cast expressions replaced with uses " "of sunken Casts")
STATISTIC (NumMemoryInsts, "Number of memory instructions whose address " "computations were sunk")
STATISTIC (NumMemoryInstsPhiCreated, "Number of phis created when address " "computations were sunk to memory instructions")
STATISTIC (NumMemoryInstsSelectCreated, "Number of select created when address " "computations were sunk to memory instructions")
STATISTIC (NumExtsMoved, "Number of [s|z]ext instructions combined with loads")
STATISTIC (NumExtUses, "Number of uses of [s|z]ext instructions optimized")
STATISTIC (NumAndsAdded, "Number of and mask instructions added to form ext loads")
STATISTIC (NumAndUses, "Number of uses of and mask instructions optimized")
STATISTIC (NumRetsDup, "Number of return instructions duplicated")
STATISTIC (NumDbgValueMoved, "Number of debug value instructions moved")
STATISTIC (NumSelectsExpanded, "Number of selects turned into branches")
STATISTIC (NumStoreExtractExposed, "Number of store(extractelement) exposed")
INITIALIZE_PASS_BEGIN (CodeGenPrepareLegacyPass, DEBUG_TYPE, "Optimize for code generation", false, false) INITIALIZE_PASS_END(CodeGenPrepareLegacyPass
static void replaceAllUsesWith (Value *Old, Value *New, SmallSet< BasicBlock *, 32 > &FreshBBs, bool IsHuge)
Replace all old uses with new ones, and push the updated BBs into FreshBBs.
static void computeBaseDerivedRelocateMap (const SmallVectorImpl< GCRelocateInst * > &AllRelocateCalls, MapVector< GCRelocateInst *, SmallVector< GCRelocateInst *, 0 > > &RelocateInstMap)
static bool getGEPSmallConstantIntOffsetV (GetElementPtrInst *GEP, SmallVectorImpl< Value * > &OffsetV)
static bool simplifyRelocatesOffABase (GCRelocateInst *RelocatedBase, const SmallVectorImpl< GCRelocateInst * > &Targets)
static bool SinkCast (CastInst *CI)
Sink the specified cast instruction into its user blocks.
static bool OptimizeNoopCopyExpression (CastInst *CI, const TargetLowering &TLI, const DataLayout &DL)
If the specified cast instruction is a noop copy (e.g.
static bool matchIncrement (const Instruction *IVInc, Instruction *&LHS, Constant *&Step)
static std::optional< std::pair< Instruction *, Constant * > > getIVIncrement (const PHINode *PN, const LoopInfo *LI)
If given PN is an inductive variable with value IVInc coming from the backedge, and on each iteration it gets increased by Step, return pair <IVInc, Step>.
static bool isIVIncrement (const Value *V, const LoopInfo *LI)
static bool matchUAddWithOverflowConstantEdgeCases (CmpInst *Cmp, BinaryOperator *&Add)
Match special-case patterns that check for unsigned add overflow.
static bool sinkCmpExpression (CmpInst *Cmp, const TargetLowering &TLI)
Sink the given CmpInst into user blocks to reduce the number of virtual registers that must be created and coalesced.
static bool foldICmpWithDominatingICmp (CmpInst *Cmp, const TargetLowering &TLI)
For pattern like:
static bool swapICmpOperandsToExposeCSEOpportunities (CmpInst *Cmp)
Many architectures use the same instruction for both subtract and cmp.
static bool foldFCmpToFPClassTest (CmpInst *Cmp, const TargetLowering &TLI, const DataLayout &DL)
static bool isRemOfLoopIncrementWithLoopInvariant (Instruction *Rem, const LoopInfo *LI, Value *&RemAmtOut, Value *&AddInstOut, Value *&AddOffsetOut, PHINode *&LoopIncrPNOut)
static bool foldURemOfLoopIncrement (Instruction *Rem, const DataLayout *DL, const LoopInfo *LI, SmallSet< BasicBlock *, 32 > &FreshBBs, bool IsHuge)
static bool adjustIsPower2Test (CmpInst *Cmp, const TargetLowering &TLI, const TargetTransformInfo &TTI, const DataLayout &DL)
Some targets have better codegen for ctpop(X) u< 2 than ctpop(X) == 1.
static bool sinkAndCmp0Expression (Instruction *AndI, const TargetLowering &TLI, SetOfInstrs &InsertedInsts)
Duplicate and sink the given 'and' instruction into user blocks where it is used in a compare to allow isel to generate better code for targets where this operation can be combined.
static bool isExtractBitsCandidateUse (Instruction *User)
Check if the candidates could be combined with a shift instruction, which includes:
static bool SinkShiftAndTruncate (BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, DenseMap< BasicBlock *, BinaryOperator * > &InsertedShifts, const TargetLowering &TLI, const DataLayout &DL)
Sink both shift and truncate instruction to the use of truncate's BB.
static bool OptimizeExtractBits (BinaryOperator *ShiftI, ConstantInt *CI, const TargetLowering &TLI, const DataLayout &DL)
Sink the shift right instruction into user blocks if the uses could potentially be combined with this shift instruction and generate BitExtract instruction.
static bool despeculateCountZeros (IntrinsicInst *CountZeros, LoopInfo &LI, const TargetLowering *TLI, const DataLayout *DL, ModifyDT &ModifiedDT, SmallSet< BasicBlock *, 32 > &FreshBBs, bool IsHugeFunc)
If counting leading or trailing zeros is an expensive operation and a zero input is defined, add a check for zero to avoid calling the intrinsic.
static bool isIntrinsicOrLFToBeTailCalled (const TargetLibraryInfo *TLInfo, const CallInst *CI)
static bool MightBeFoldableInst (Instruction *I)
This is a little filter, which returns true if an addressing computation involving I might be folded into a load/store accessing it.
static bool isPromotedInstructionLegal (const TargetLowering &TLI, const DataLayout &DL, Value *Val)
Check whether or not Val is a legal instruction for TLI.
static bool IsOperandAMemoryOperand (CallInst *CI, InlineAsm *IA, Value *OpVal, const TargetLowering &TLI, const TargetRegisterInfo &TRI)
Check to see if all uses of OpVal by the specified inline asm call are due to memory operands.
static bool FindAllMemoryUses (Instruction *I, SmallVectorImpl< std::pair< Use *, Type * > > &MemoryUses, SmallPtrSetImpl< Instruction * > &ConsideredInsts, const TargetLowering &TLI, const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI, unsigned &SeenInsts)
Recursively walk all the uses of I until we find a memory use.
static bool FindAllMemoryUses (Instruction *I, SmallVectorImpl< std::pair< Use *, Type * > > &MemoryUses, const TargetLowering &TLI, const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
static bool IsNonLocalValue (Value *V, BasicBlock *BB)
Return true if the specified values are defined in a different basic block than BB.
static bool hasSameExtUse (Value *Val, const TargetLowering &TLI)
Check if all the uses of Val are equivalent (or free) zero or sign extensions.
static bool sinkSelectOperand (const TargetTransformInfo *TTI, Value *V)
Check if V (an operand of a select instruction) is an expensive instruction that is only used once.
static bool isFormingBranchFromSelectProfitable (const TargetTransformInfo *TTI, const TargetLowering *TLI, SelectInst *SI)
Returns true if a SelectInst should be turned into an explicit branch.
static Value * getTrueOrFalseValue (SelectInst *SI, bool isTrue, const SmallPtrSet< const Instruction *, 2 > &Selects)
If isTrue is true, return the true value of SI, otherwise return false value of SI.
static bool splitMergedValStore (StoreInst &SI, const DataLayout &DL, const TargetLowering &TLI)
For the instruction sequence of store below, F and I values are bundled together as an i64 value before being stored into memory.
static bool GEPSequentialConstIndexed (GetElementPtrInst *GEP)
static bool tryUnmergingGEPsAcrossIndirectBr (GetElementPtrInst *GEPI, const TargetTransformInfo *TTI)
static bool optimizeBranch (BranchInst *Branch, const TargetLowering &TLI, SmallSet< BasicBlock *, 32 > &FreshBBs, bool IsHugeFunc)
static void DbgInserterHelper (DbgValueInst *DVI, Instruction *VI)
static void DbgInserterHelper (DbgVariableRecord *DVR, Instruction *VI)
static void scaleWeights (uint64_t &NewTrue, uint64_t &NewFalse)
Scale down both weights to fit into uint32_t.
Variables
static cl::opt< bool > DisableBranchOpts ("disable-cgp-branch-opts", cl::Hidden, cl::init(false), cl::desc("Disable branch optimizations in CodeGenPrepare"))
static cl::opt< bool > DisableGCOpts ("disable-cgp-gc-opts", cl::Hidden, cl::init(false), cl::desc("Disable GC optimizations in CodeGenPrepare"))
static cl::opt< bool > DisableSelectToBranch ("disable-cgp-select2branch", cl::Hidden, cl::init(false), cl::desc("Disable select to branch conversion."))
static cl::opt< bool > AddrSinkUsingGEPs ("addr-sink-using-gep", cl::Hidden, cl::init(true), cl::desc("Address sinking in CGP using GEPs."))
static cl::opt< bool > EnableAndCmpSinking ("enable-andcmp-sinking", cl::Hidden, cl::init(true), cl::desc("Enable sinking and/cmp into branches."))
static cl::opt< bool > DisableStoreExtract ("disable-cgp-store-extract", cl::Hidden, cl::init(false), cl::desc("Disable store(extract) optimizations in CodeGenPrepare"))
static cl::opt< bool > StressStoreExtract ("stress-cgp-store-extract", cl::Hidden, cl::init(false), cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"))
static cl::opt< bool > DisableExtLdPromotion ("disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " "CodeGenPrepare"))
static cl::opt< bool > StressExtLdPromotion ("stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " "optimization in CodeGenPrepare"))
static cl::opt< bool > DisablePreheaderProtect ("disable-preheader-prot", cl::Hidden, cl::init(false), cl::desc("Disable protection against removing loop preheaders"))
static cl::opt< bool > ProfileGuidedSectionPrefix ("profile-guided-section-prefix", cl::Hidden, cl::init(true), cl::desc("Use profile info to add section prefix for hot/cold functions"))
static cl::opt< bool > ProfileUnknownInSpecialSection ("profile-unknown-in-special-section", cl::Hidden, cl::desc("In profiling mode like sampleFDO, if a function doesn't have " "profile, we cannot tell the function is cold for sure because " "it may be a function newly added without ever being sampled. " "With the flag enabled, compiler can put such profile unknown " "functions into a special section, so runtime system can choose " "to handle it in a different way than .text section, to save " "RAM for example. "))
static cl::opt< bool > BBSectionsGuidedSectionPrefix ("bbsections-guided-section-prefix", cl::Hidden, cl::init(true), cl::desc("Use the basic-block-sections profile to determine the text " "section prefix for hot functions. Functions with " "basic-block-sections profile will be placed in `.text.hot` " "regardless of their FDO profile info. Other functions won't be " "impacted, i.e., their prefixes will be decided by FDO/sampleFDO " "profiles."))
static cl::opt< uint64_t > FreqRatioToSkipMerge ("cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2), cl::desc("Skip merging empty blocks if (frequency of empty block) / " "(frequency of destination block) is greater than this ratio"))
static cl::opt< bool > ForceSplitStore ("force-split-store", cl::Hidden, cl::init(false), cl::desc("Force store splitting no matter what the target query says."))
static cl::opt< bool > EnableTypePromotionMerge ("cgp-type-promotion-merge", cl::Hidden, cl::desc("Enable merging of redundant sexts when one is dominating" " the other."), cl::init(true))
static cl::opt< bool > DisableComplexAddrModes ("disable-complex-addr-modes", cl::Hidden, cl::init(false), cl::desc("Disables combining addressing modes with different parts " "in optimizeMemoryInst."))
static cl::opt< bool > AddrSinkNewPhis ("addr-sink-new-phis", cl::Hidden, cl::init(false), cl::desc("Allow creation of Phis in Address sinking."))
static cl::opt< bool > AddrSinkNewSelects ("addr-sink-new-select", cl::Hidden, cl::init(true), cl::desc("Allow creation of selects in Address sinking."))
static cl::opt< bool > AddrSinkCombineBaseReg ("addr-sink-combine-base-reg", cl::Hidden, cl::init(true), cl::desc("Allow combining of BaseReg field in Address sinking."))
static cl::opt< bool > AddrSinkCombineBaseGV ("addr-sink-combine-base-gv", cl::Hidden, cl::init(true), cl::desc("Allow combining of BaseGV field in Address sinking."))
static cl::opt< bool > AddrSinkCombineBaseOffs ("addr-sink-combine-base-offs", cl::Hidden, cl::init(true), cl::desc("Allow combining of BaseOffs field in Address sinking."))
static cl::opt< bool > AddrSinkCombineScaledReg ("addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true), cl::desc("Allow combining of ScaledReg field in Address sinking."))
static cl::opt< bool > EnableGEPOffsetSplit ("cgp-split-large-offset-gep", cl::Hidden, cl::init(true), cl::desc("Enable splitting large offset of GEP."))
static cl::opt< bool > EnableICMP_EQToICMP_ST ("cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false), cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion."))
static cl::opt< bool > VerifyBFIUpdates ("cgp-verify-bfi-updates", cl::Hidden, cl::init(false), cl::desc("Enable BFI update verification for " "CodeGenPrepare."))
static cl::opt< bool > OptimizePhiTypes ("cgp-optimize-phi-types", cl::Hidden, cl::init(true), cl::desc("Enable converting phi types in CodeGenPrepare"))
static cl::opt< unsigned > HugeFuncThresholdInCGPP ("cgpp-huge-func", cl::init(10000), cl::Hidden, cl::desc("Least BB number of huge function."))
static cl::opt< unsigned > MaxAddressUsersToScan ("cgp-max-address-users-to-scan", cl::init(100), cl::Hidden, cl::desc("Max number of address users to look at"))
static cl::opt< bool > DisableDeletePHIs ("disable-cgp-delete-phis", cl::Hidden, cl::init(false), cl::desc("Disable elimination of dead PHI nodes."))
DEBUG_TYPE
Optimize for code generation
Optimize for code false

DEBUG_TYPE

#define DEBUG_TYPE "codegenprepare"

adjustIsPower2Test()

computeBaseDerivedRelocateMap()

Definition at line 1201 of file CodeGenPrepare.cpp.

References llvm::MapVector< KeyT, ValueT, MapType, VectorType >::end(), llvm::MapVector< KeyT, ValueT, MapType, VectorType >::find(), I, and llvm::MapVector< KeyT, ValueT, MapType, VectorType >::insert().

DbgInserterHelper() [1/2]

DbgInserterHelper() [2/2]

despeculateCountZeros()

If counting leading or trailing zeros is an expensive operation and a zero input is defined, add a check for zero to avoid calling the intrinsic.

We want to transform: z = call i64 @llvm.cttz.i64(i64 A, i1 false)

into: entry: cmpz = icmp eq i64 A, 0 br i1 cmpz, label cond.end, label cond.false cond.false: z = call i64 @llvm.cttz.i64(i64 A, i1 true) br label cond.end cond.end: ctz = phi i64 [ 64, entry ], [ z, cond.false ]

If the transform is performed, return true and set ModifiedDT to true.

Definition at line 2494 of file CodeGenPrepare.cpp.

References llvm::PHINode::addIncoming(), llvm::BasicBlock::begin(), llvm::BitWidth, llvm::IRBuilderBase::CreateCondBr(), llvm::IRBuilderBase::CreateFreeze(), llvm::IRBuilderBase::CreateICmpEQ(), llvm::IRBuilderBase::CreatePHI(), DL, llvm::Instruction::eraseFromParent(), llvm::Value::getContext(), llvm::Instruction::getDebugLoc(), llvm::IRBuilderBase::getInt(), llvm::IntrinsicInst::getIntrinsicID(), llvm::LoopInfoBase< BlockT, LoopT >::getLoopFor(), llvm::Constant::getNullValue(), llvm::User::getOperand(), llvm::User::getOperandUse(), llvm::ilist_detail::node_parent_access< NodeTy, ParentTy >::getParent(), llvm::Type::getScalarSizeInBits(), llvm::BasicBlock::getTerminator(), llvm::IRBuilderBase::getTrue(), llvm::Value::getType(), llvm::SmallSet< T, N, C >::insert(), llvm::TargetLoweringBase::isCheapToSpeculateCtlz(), llvm::TargetLoweringBase::isCheapToSpeculateCttz(), llvm::isGuaranteedNotToBeUndefOrPoison(), llvm::isKnownNonZero(), llvm::Type::isVectorTy(), llvm::PatternMatch::m_One(), llvm::PatternMatch::match(), replaceAllUsesWith(), llvm::CallBase::setArgOperand(), llvm::IRBuilderBase::SetCurrentDebugLocation(), llvm::IRBuilderBase::SetInsertPoint(), and llvm::BasicBlock::splitBasicBlock().

FindAllMemoryUses() [1/2]

FindAllMemoryUses() [2/2]

Recursively walk all the uses of I until we find a memory use.

If we find an obviously non-foldable instruction, return true. Add accessed addresses and types to MemoryUses.

Definition at line 5495 of file CodeGenPrepare.cpp.

References FindAllMemoryUses(), llvm::StoreInst::getPointerOperandIndex(), llvm::AtomicCmpXchgInst::getPointerOperandIndex(), llvm::AtomicRMWInst::getPointerOperandIndex(), getType(), I, llvm::SmallPtrSetImpl< PtrType >::insert(), IsOperandAMemoryOperand(), MaxAddressUsersToScan, MightBeFoldableInst(), llvm::shouldOptimizeForSize(), and TRI.

Referenced by FindAllMemoryUses().

foldFCmpToFPClassTest()

Definition at line 1950 of file CodeGenPrepare.cpp.

References llvm::IRBuilderBase::createIsFPClass(), DL, llvm::fcInf, llvm::fcmpToClassTest(), llvm::fcNan, llvm::getFCmpCondCode(), llvm::User::getOperand(), llvm::ilist_detail::node_parent_access< NodeTy, ParentTy >::getParent(), llvm::CmpInst::getPredicate(), llvm::EVT::getSimpleVT(), llvm::TargetLoweringBase::getValueType(), llvm::TargetLoweringBase::isCondCodeLegal(), llvm::TargetLoweringBase::isFAbsFree(), and llvm::RecursivelyDeleteTriviallyDeadInstructions().

foldICmpWithDominatingICmp()

For pattern like:

DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB) ... DomBB: ... br DomCond, TrueBB, CmpBB CmpBB: (with DomBB being the single predecessor) ... Cmp = icmp eq CmpOp0, CmpOp1 ...

It would use two comparison on targets that lowering of icmp sgt/slt is different from lowering of icmp eq (PowerPC). This function try to convert 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'. After that, DomCond and Cmp can use the same comparison so reduce one comparison.

Return true if any changes are made.

Definition at line 1851 of file CodeGenPrepare.cpp.

References assert(), EnableICMP_EQToICMP_ST, llvm::BasicBlock::getSinglePredecessor(), llvm::CmpInst::getSwappedPredicate(), llvm::BasicBlock::getTerminator(), llvm::CmpInst::ICMP_EQ, llvm::CmpInst::ICMP_SGT, llvm::CmpInst::ICMP_SLT, llvm::BranchInst::isConditional(), llvm::TargetLoweringBase::isEqualityCmpFoldedWithSignedCmp(), llvm_unreachable, llvm::PatternMatch::m_Br(), llvm::PatternMatch::m_ICmp(), llvm::PatternMatch::m_Specific(), llvm::PatternMatch::m_Value(), llvm::PatternMatch::match(), and llvm::BranchInst::swapSuccessors().

foldURemOfLoopIncrement()

Definition at line 2068 of file CodeGenPrepare.cpp.

References llvm::PHINode::addIncoming(), assert(), llvm::IRBuilderBase::CreateICmp(), llvm::IRBuilderBase::CreateNUWAdd(), llvm::IRBuilderBase::CreatePHI(), llvm::IRBuilderBase::CreateSelect(), DL, llvm::Instruction::eraseFromParent(), llvm::Value::getContext(), llvm::PHINode::getIncomingValueForBlock(), llvm::LoopInfoBase< BlockT, LoopT >::getLoopFor(), llvm::Constant::getNullValue(), llvm::ilist_detail::node_parent_access< NodeTy, ParentTy >::getParent(), getParent(), llvm::Value::getType(), llvm::CmpInst::ICMP_EQ, llvm::SmallSet< T, N, C >::insert(), isRemOfLoopIncrementWithLoopInvariant(), llvm::PatternMatch::m_ImmConstant(), llvm::PatternMatch::m_NSWAdd(), llvm::PatternMatch::m_Value(), llvm::PatternMatch::match(), replaceAllUsesWith(), llvm::IRBuilderBase::SetInsertPoint(), llvm::simplifyAddInst(), llvm::simplifyURemInst(), and llvm::Value::use_empty().

GEPSequentialConstIndexed()

getGEPSmallConstantIntOffsetV()

getIVIncrement()

If given PN is an inductive variable with value IVInc coming from the backedge, and on each iteration it gets increased by Step, return pair <IVInc, Step>.

Otherwise, return std::nullopt.

Definition at line 1527 of file CodeGenPrepare.cpp.

References llvm::PHINode::getIncomingValueForBlock(), llvm::LoopInfoBase< BlockT, LoopT >::getLoopFor(), llvm::ilist_detail::node_parent_access< NodeTy, ParentTy >::getParent(), LHS, and matchIncrement().

Referenced by isIVIncrement(), and isRemOfLoopIncrementWithLoopInvariant().

getTrueOrFalseValue()

If isTrue is true, return the true value of SI, otherwise return false value of SI.

If the true/false value of SI is defined by any select instructions in Selects, look through the defining select instruction until the true/false value is not defined in Selects.

Definition at line 7319 of file CodeGenPrepare.cpp.

References assert(), and llvm::SmallPtrSetImpl< PtrType >::count().

hasSameExtUse()

INITIALIZE_PASS_BEGIN()

INITIALIZE_PASS_BEGIN ( CodeGenPrepareLegacyPass ,
DEBUG_TYPE ,
"Optimize for code generation" ,
false ,
false
)

isExtractBitsCandidateUse()

isFormingBranchFromSelectProfitable()

isIntrinsicOrLFToBeTailCalled()

isIVIncrement()

IsNonLocalValue()

Return true if the specified values are defined in a different basic block than BB.

Definition at line 5714 of file CodeGenPrepare.cpp.

References I.

IsOperandAMemoryOperand()

isPromotedInstructionLegal()

isRemOfLoopIncrementWithLoopInvariant()

matchIncrement()

matchUAddWithOverflowConstantEdgeCases()

Match special-case patterns that check for unsigned add overflow.

Definition at line 1638 of file CodeGenPrepare.cpp.

References A, llvm::Add, B, llvm::Constant::getAllOnesValue(), llvm::CmpInst::ICMP_EQ, llvm::CmpInst::ICMP_NE, llvm::PatternMatch::m_Add(), llvm::PatternMatch::m_AllOnes(), llvm::PatternMatch::m_Specific(), llvm::PatternMatch::m_ZeroInt(), and llvm::PatternMatch::match().

MightBeFoldableInst()

This is a little filter, which returns true if an addressing computation involving I might be folded into a load/store accessing it.

This doesn't need to be perfect, but needs to accept at least the set of instructions that MatchOperationAddr can.

Definition at line 4613 of file CodeGenPrepare.cpp.

References I.

Referenced by FindAllMemoryUses().

optimizeBranch()

Definition at line 8521 of file CodeGenPrepare.cpp.

References llvm::IRBuilderBase::CreateCmp(), llvm::dbgs(), llvm::Instruction::dropPoisonGeneratingFlags(), llvm::ilist_detail::node_parent_access< NodeTy, ParentTy >::getParent(), llvm::Value::getType(), llvm::APInt::isPowerOf2(), LLVM_DEBUG, llvm::APInt::logBase2(), llvm::PatternMatch::m_Add(), llvm::PatternMatch::m_Shr(), llvm::PatternMatch::m_Specific(), llvm::PatternMatch::m_SpecificInt(), llvm::PatternMatch::m_Sub(), llvm::PatternMatch::match(), llvm::Instruction::moveBefore(), llvm::TargetLoweringBase::preferZeroCompareBranch(), replaceAllUsesWith(), and X.

OptimizeExtractBits()

Sink the shift right instruction into user blocks if the uses could potentially be combined with this shift instruction and generate BitExtract instruction.

It will only be applied if the architecture supports BitExtract instruction. Here is an example: BB1: x.extract.shift = lshr i64 arg1, 32 BB2: x.extract.trunc = trunc i64 x.extract.shift to i16 ==>

BB2: x.extract.shift.1 = lshr i64 arg1, 32 x.extract.trunc = trunc i64 x.extract.shift.1 to i16

CodeGen will recognize the pattern in BB2 and generate BitExtract instruction. Return true if any changes are made.

Only insert instructions in each block once.

Definition at line 2392 of file CodeGenPrepare.cpp.

References assert(), DL, llvm::BasicBlock::end(), llvm::Instruction::eraseFromParent(), llvm::Instruction::getDebugLoc(), llvm::BasicBlock::getFirstInsertionPt(), llvm::BinaryOperator::getOpcode(), llvm::User::getOperand(), llvm::ilist_detail::node_parent_access< NodeTy, ParentTy >::getParent(), llvm::Value::getType(), llvm::TargetLoweringBase::getValueType(), llvm::Instruction::insertBefore(), isExtractBitsCandidateUse(), llvm::TargetLoweringBase::isTypeLegal(), llvm::salvageDebugInfo(), llvm::Instruction::setDebugLoc(), SinkShiftAndTruncate(), llvm::Value::use_empty(), llvm::Value::user_begin(), and llvm::Value::user_end().

OptimizeNoopCopyExpression()

If the specified cast instruction is a noop copy (e.g.

it's casting from one pointer type to another, i32->i8 on PPC), sink it into user blocks to reduce the number of virtual registers that must be created and coalesced.

Return true if any changes are made.

Definition at line 1465 of file CodeGenPrepare.cpp.

References llvm::EVT::bitsLT(), DL, llvm::Value::getContext(), llvm::User::getOperand(), llvm::Value::getType(), llvm::TargetLoweringBase::getTypeAction(), llvm::TargetLoweringBase::getTypeToTransformTo(), llvm::TargetLoweringBase::getValueType(), llvm::TargetLoweringBase::isFreeAddrSpaceCast(), llvm::EVT::isInteger(), SinkCast(), and llvm::TargetLoweringBase::TypePromoteInteger.

replaceAllUsesWith()

scaleWeights()

simplifyRelocatesOffABase()

Definition at line 1253 of file CodeGenPrepare.cpp.

References assert(), llvm::sampleprof::Base, llvm::IRBuilderBase::CreateBitCast(), llvm::IRBuilderBase::CreateGEP(), llvm::GCRelocateInst::getBasePtrIndex(), getGEPSmallConstantIntOffsetV(), llvm::ilist_node_with_parent< NodeTy, ParentTy, Options >::getNextNode(), llvm::ilist_detail::node_parent_access< NodeTy, ParentTy >::getParent(), llvm::GCProjectionInst::getStatepoint(), llvm::Value::getType(), llvm::Instruction::moveBefore(), llvm::Value::replaceAllUsesWith(), llvm::IRBuilderBase::SetCurrentDebugLocation(), and llvm::Value::takeName().

sinkAndCmp0Expression()

Duplicate and sink the given 'and' instruction into user blocks where it is used in a compare to allow isel to generate better code for targets where this operation can be combined.

Return true if any changes are made.

Definition at line 2207 of file CodeGenPrepare.cpp.

References assert(), llvm::BinaryOperator::Create(), llvm::dbgs(), llvm::Value::dump(), llvm::Instruction::eraseFromParent(), llvm::Instruction::getDebugLoc(), llvm::ilist_node_impl< OptionsT >::getIterator(), llvm::User::getOperand(), llvm::ilist_detail::node_parent_access< NodeTy, ParentTy >::getParent(), llvm::Value::hasOneUse(), llvm::TargetLoweringBase::isMaskAndCmp0FoldingBeneficial(), llvm::APInt::isZero(), LLVM_DEBUG, llvm::Instruction::setDebugLoc(), llvm::Value::user_begin(), llvm::Value::user_end(), and llvm::Value::users().

SinkCast()

Sink the specified cast instruction into its user blocks.

InsertedCasts - Only insert a cast in each block once.

Definition at line 1397 of file CodeGenPrepare.cpp.

References assert(), llvm::Instruction::clone(), llvm::BasicBlock::end(), llvm::Instruction::eraseFromParent(), llvm::BasicBlock::getFirstInsertionPt(), llvm::ilist_detail::node_parent_access< NodeTy, ParentTy >::getParent(), llvm::BasicBlock::getTerminator(), llvm::Instruction::insertBefore(), llvm::Instruction::isEHPad(), llvm::salvageDebugInfo(), llvm::Value::use_empty(), llvm::Value::user_begin(), and llvm::Value::user_end().

Referenced by OptimizeNoopCopyExpression().

sinkCmpExpression()

sinkSelectOperand()

SinkShiftAndTruncate()

Sink both shift and truncate instruction to the use of truncate's BB.

Definition at line 2299 of file CodeGenPrepare.cpp.

References assert(), llvm::CastInst::Create(), DL, llvm::BasicBlock::end(), llvm::Instruction::getDebugLoc(), llvm::BasicBlock::getFirstInsertionPt(), llvm::BinaryOperator::getOpcode(), llvm::Instruction::getOpcode(), llvm::User::getOperand(), llvm::ilist_detail::node_parent_access< NodeTy, ParentTy >::getParent(), llvm::Value::getType(), llvm::TargetLoweringBase::getValueType(), llvm::Instruction::insertBefore(), llvm::TargetLoweringBase::InstructionOpcodeToISD(), llvm::TargetLoweringBase::isOperationLegalOrCustom(), and llvm::Instruction::setDebugLoc().

Referenced by OptimizeExtractBits().

splitMergedValStore()

For the instruction sequence of store below, F and I values are bundled together as an i64 value before being stored into memory.

Sometimes it is more efficient to generate separate stores for F and I, which can remove the bitwise instructions or sink them to colder places.

(store (or (zext (bitcast F to i32) to i64), (shl (zext I to i64), 32)), addr) --> (store F, addr) and (store I, addr+4)

Similarly, splitting for other merged store can also be beneficial, like: For pair of {i32, i32}, i64 store --> two i32 stores. For pair of {i32, i16}, i64 store --> two i32 stores. For pair of {i16, i16}, i32 store --> two i16 stores. For pair of {i16, i8}, i32 store --> two i16 stores. For pair of {i8, i8}, i16 store --> two i8 stores.

We allow each target to determine specifically which kind of splitting is supported.

The store patterns are commonly seen from the simple code snippet below if only std::make_pair(...) is sroa transformed before inlined into hoo. void goo(const std::pair<int, float> &); hoo() { ... goo(std::make_pair(tmp, ftmp)); ... }

Although we already have similar splitting in DAG Combine, we duplicate it in CodeGenPrepare to catch the case in which pattern is across multiple BBs. The logic in DAG Combine is kept to catch case generated during code expansion.

Definition at line 8255 of file CodeGenPrepare.cpp.

References Addr, llvm::commonAlignment(), llvm::IRBuilderBase::CreateAlignedStore(), llvm::IRBuilderBase::CreateBitCast(), llvm::IRBuilderBase::CreateGEP(), llvm::IRBuilderBase::CreateZExtOrBitCast(), DL, ForceSplitStore, llvm::EVT::getEVT(), llvm::Type::getInt32Ty(), llvm::Type::getIntNTy(), llvm::Value::getType(), llvm::Type::isIntegerTy(), llvm::TargetLoweringBase::isMultiStoresCheaperThanBitsMerge(), llvm::Type::isScalableTy(), LValue, llvm::PatternMatch::m_c_Or(), llvm::PatternMatch::m_OneUse(), llvm::PatternMatch::m_Shl(), llvm::PatternMatch::m_SpecificInt(), llvm::PatternMatch::m_Value(), llvm::PatternMatch::m_ZExt(), llvm::PatternMatch::match(), llvm::IRBuilderBase::SetInsertPoint(), and llvm::Upper.

STATISTIC() [1/16]

STATISTIC ( NumAndsAdded ,
"Number of and mask instructions added to form ext loads"
)

STATISTIC() [2/16]

STATISTIC() [3/16]

STATISTIC ( NumBlocksElim ,
"Number of blocks eliminated"
)

STATISTIC() [4/16]

STATISTIC ( NumCastUses ,
"Number of uses of Cast expressions replaced with uses " "of sunken Casts"
)

STATISTIC() [5/16]

STATISTIC ( NumCmpUses ,
"Number of uses of Cmp expressions replaced with uses of " "sunken Cmps"
)

STATISTIC() [6/16]

STATISTIC() [7/16]

STATISTIC ( NumExtsMoved ,
"Number of ext instructions combined with loads" [s|z]
)

STATISTIC() [8/16]

STATISTIC() [9/16]

STATISTIC ( NumGEPsElim ,
"Number of GEPs converted to casts"
)

STATISTIC() [10/16]

STATISTIC ( NumMemoryInsts ,
"Number of memory instructions whose address " "computations were sunk"
)

STATISTIC() [11/16]

STATISTIC ( NumMemoryInstsPhiCreated ,
"Number of phis created when address " "computations were sunk to memory instructions"
)

STATISTIC() [12/16]

STATISTIC ( NumMemoryInstsSelectCreated ,
"Number of select created when address " "computations were sunk to memory instructions"
)

STATISTIC() [13/16]

STATISTIC ( NumPHIsElim ,
"Number of trivial PHIs eliminated"
)

STATISTIC() [14/16]

STATISTIC ( NumRetsDup ,
"Number of return instructions duplicated"
)

STATISTIC() [15/16]

STATISTIC ( NumSelectsExpanded ,
"Number of selects turned into branches"
)

STATISTIC() [16/16]

STATISTIC ( NumStoreExtractExposed ,
"Number of store(extractelement) exposed"
)

swapICmpOperandsToExposeCSEOpportunities()

static bool swapICmpOperandsToExposeCSEOpportunities ( CmpInst * Cmp) static

tryUnmergingGEPsAcrossIndirectBr()

Definition at line 8416 of file CodeGenPrepare.cpp.

References GEPSequentialConstIndexed(), llvm::TargetTransformInfo::getIntImmCost(), llvm::User::getOperand(), llvm::ilist_detail::node_parent_access< NodeTy, ParentTy >::getParent(), llvm::BasicBlock::getTerminator(), llvm::Value::getType(), llvm::ConstantInt::getValue(), llvm::none_of(), llvm::TargetTransformInfo::TCC_Basic, llvm::TargetTransformInfo::TCK_SizeAndLatency, and llvm::Value::users().

AddrSinkCombineBaseGV

cl::opt< bool > AddrSinkCombineBaseGV("addr-sink-combine-base-gv", cl::Hidden, cl::init(true), cl::desc("Allow combining of BaseGV field in Address sinking.")) ( "addr-sink-combine-base-gv" , cl::Hidden , cl::init(true) , cl::desc("Allow combining of BaseGV field in Address sinking.") ) static

AddrSinkCombineBaseOffs

cl::opt< bool > AddrSinkCombineBaseOffs("addr-sink-combine-base-offs", cl::Hidden, cl::init(true), cl::desc("Allow combining of BaseOffs field in Address sinking.")) ( "addr-sink-combine-base-offs" , cl::Hidden , cl::init(true) , cl::desc("Allow combining of BaseOffs field in Address sinking.") ) static

AddrSinkCombineBaseReg

cl::opt< bool > AddrSinkCombineBaseReg("addr-sink-combine-base-reg", cl::Hidden, cl::init(true), cl::desc("Allow combining of BaseReg field in Address sinking.")) ( "addr-sink-combine-base-reg" , cl::Hidden , cl::init(true) , cl::desc("Allow combining of BaseReg field in Address sinking.") ) static

AddrSinkCombineScaledReg

cl::opt< bool > AddrSinkCombineScaledReg("addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true), cl::desc("Allow combining of ScaledReg field in Address sinking.")) ( "addr-sink-combine-scaled-reg" , cl::Hidden , cl::init(true) , cl::desc("Allow combining of ScaledReg field in Address sinking.") ) static

AddrSinkNewPhis

cl::opt< bool > AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false), cl::desc("Allow creation of Phis in Address sinking.")) ( "addr-sink-new-phis" , cl::Hidden , cl::init(false) , cl::desc("Allow creation of Phis in Address sinking.") ) static

AddrSinkNewSelects

AddrSinkUsingGEPs

BBSectionsGuidedSectionPrefix

cl::opt< bool > BBSectionsGuidedSectionPrefix("bbsections-guided-section-prefix", cl::Hidden, cl::init(true), cl::desc("Use the basic-block-sections profile to determine the text " "section prefix for hot functions. Functions with " "basic-block-sections profile will be placed in `.text.hot` " "regardless of their FDO profile info. Other functions won't be " "impacted, i.e., their prefixes will be decided by FDO/sampleFDO " "profiles.")) ( "bbsections-guided-section-prefix" , cl::Hidden , cl::init(true) , cl::desc("Use the basic-block-sections profile to determine the text " "section prefix for hot functions. Functions with " "basic-block-sections profile will be placed in `.text.hot` " "regardless of their FDO profile info. Other functions won't be " "impacted, i.e., their prefixes will be decided by FDO/sampleFDO " "profiles.") ) static

DEBUG_TYPE

DisableBranchOpts

cl::opt< bool > DisableBranchOpts("disable-cgp-branch-opts", cl::Hidden, cl::init(false), cl::desc("Disable branch optimizations in CodeGenPrepare")) ( "disable-cgp-branch-opts" , cl::Hidden , cl::init(false) , cl::desc("Disable branch optimizations in CodeGenPrepare") ) static

DisableComplexAddrModes

cl::opt< bool > DisableComplexAddrModes("disable-complex-addr-modes", cl::Hidden, cl::init(false), cl::desc("Disables combining addressing modes with different parts " "in optimizeMemoryInst.")) ( "disable-complex-addr-modes" , cl::Hidden , cl::init(false) , cl::desc("Disables combining addressing modes with different parts " "in optimizeMemoryInst.") ) static

DisableDeletePHIs

cl::opt< bool > DisableDeletePHIs("disable-cgp-delete-phis", cl::Hidden, cl::init(false), cl::desc("Disable elimination of dead PHI nodes.")) ( "disable-cgp-delete-phis" , cl::Hidden , cl::init(false) , cl::desc("Disable elimination of dead PHI nodes.") ) static

DisableExtLdPromotion

cl::opt< bool > DisableExtLdPromotion("disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " "CodeGenPrepare")) ( "disable-cgp-ext-ld-promotion" , cl::Hidden , cl::init(false) , cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " "CodeGenPrepare") ) static

DisableGCOpts

DisablePreheaderProtect

cl::opt< bool > DisablePreheaderProtect("disable-preheader-prot", cl::Hidden, cl::init(false), cl::desc("Disable protection against removing loop preheaders")) ( "disable-preheader-prot" , cl::Hidden , cl::init(false) , cl::desc("Disable protection against removing loop preheaders") ) static

DisableSelectToBranch

cl::opt< bool > DisableSelectToBranch("disable-cgp-select2branch", cl::Hidden, cl::init(false), cl::desc("Disable select to branch conversion.")) ( "disable-cgp-select2branch" , cl::Hidden , cl::init(false) , cl::desc("Disable select to branch conversion.") ) static

DisableStoreExtract

EnableAndCmpSinking

cl::opt< bool > EnableAndCmpSinking("enable-andcmp-sinking", cl::Hidden, cl::init(true), cl::desc("Enable sinking and/cmp into branches.")) ( "enable-andcmp-sinking" , cl::Hidden , cl::init(true) , cl::desc("Enable sinking and/cmp into branches.") ) static

EnableGEPOffsetSplit

cl::opt< bool > EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden, cl::init(true), cl::desc("Enable splitting large offset of GEP.")) ( "cgp-split-large-offset-gep" , cl::Hidden , cl::init(true) , cl::desc("Enable splitting large offset of GEP.") ) static

EnableICMP_EQToICMP_ST

| cl::opt< bool > EnableICMP_EQToICMP_ST("cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false), cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion.")) ( "cgp-icmp-eq2icmp-st" , cl::Hidden , cl::init(false) , cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion.") ) | static | | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ |

EnableTypePromotionMerge

cl::opt< bool > EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden, cl::desc("Enable merging of redundant sexts when one is dominating" " the other."), cl::init(true)) ( "cgp-type-promotion-merge" , cl::Hidden , cl::desc("Enable merging of redundant sexts when one is dominating" " the other.") , cl::init(true) ) static

false

ForceSplitStore

cl::opt< bool > ForceSplitStore("force-split-store", cl::Hidden, cl::init(false), cl::desc("Force store splitting no matter what the target query says.")) ( "force-split-store" , cl::Hidden , cl::init(false) , cl::desc("Force store splitting no matter what the target query says.") ) static

FreqRatioToSkipMerge

cl::opt< uint64_t > FreqRatioToSkipMerge("cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2), cl::desc("Skip merging empty blocks if (frequency of empty block) / " "(frequency of destination block) is greater than this ratio")) ( "cgp-freq-ratio-to-skip-merge" , cl::Hidden , cl::init(2) , cl::desc("Skip merging empty blocks if (frequency of empty block) / " "(frequency of destination block) is greater than this ratio") ) static

generation

Optimize for code generation

HugeFuncThresholdInCGPP

cl::opt< unsigned > HugeFuncThresholdInCGPP("cgpp-huge-func", cl::init(10000), cl::Hidden, cl::desc("Least BB number of huge function.")) ( "cgpp-huge-func" , cl::init(10000) , cl::Hidden , cl::desc("Least BB number of huge function.") ) static

MaxAddressUsersToScan

cl::opt< unsigned > MaxAddressUsersToScan("cgp-max-address-users-to-scan", cl::init(100), cl::Hidden, cl::desc("Max number of address users to look at")) ( "cgp-max-address-users-to-scan" , cl::init(100) , cl::Hidden , cl::desc("Max number of address users to look at") ) static

OptimizePhiTypes

cl::opt< bool > OptimizePhiTypes("cgp-optimize-phi-types", cl::Hidden, cl::init(true), cl::desc("Enable converting phi types in CodeGenPrepare")) ( "cgp-optimize-phi-types" , cl::Hidden , cl::init(true) , cl::desc("Enable converting phi types in CodeGenPrepare") ) static

ProfileGuidedSectionPrefix

cl::opt< bool > ProfileGuidedSectionPrefix("profile-guided-section-prefix", cl::Hidden, cl::init(true), cl::desc("Use profile info to add section prefix for hot/cold functions")) ( "profile-guided-section-prefix" , cl::Hidden , cl::init(true) , cl::desc("Use profile info to add section prefix for hot/cold functions") ) static

ProfileUnknownInSpecialSection

cl::opt< bool > ProfileUnknownInSpecialSection("profile-unknown-in-special-section", cl::Hidden, cl::desc("In profiling mode like sampleFDO, if a function doesn't have " "profile, we cannot tell the function is cold for sure because " "it may be a function newly added without ever being sampled. " "With the flag enabled, compiler can put such profile unknown " "functions into a special section, so runtime system can choose " "to handle it in a different way than .text section, to save " "RAM for example. ")) ( "profile-unknown-in-special-section" , cl::Hidden , cl::desc("In profiling mode like sampleFDO, if a function doesn't have " "profile, we cannot tell the function is cold for sure because " "it may be a function newly added without ever being sampled. " "With the flag enabled, compiler can put such profile unknown " "functions into a special section, so runtime system can choose " "to handle it in a different way than .text section, to save " "RAM for example. ") ) static

StressExtLdPromotion

cl::opt< bool > StressExtLdPromotion("stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " "optimization in CodeGenPrepare")) ( "stress-cgp-ext-ld-promotion" , cl::Hidden , cl::init(false) , cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " "optimization in CodeGenPrepare") ) static

StressStoreExtract

VerifyBFIUpdates

cl::opt< bool > VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden, cl::init(false), cl::desc("Enable BFI update verification for " "CodeGenPrepare.")) ( "cgp-verify-bfi-updates" , cl::Hidden , cl::init(false) , cl::desc("Enable BFI update verification for " "CodeGenPrepare.") ) static