LLVM: lib/Target/X86/X86CmovConversion.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
71#include
72#include
73#include
74#include
75
76using namespace llvm;
77
78#define DEBUG_TYPE "x86-cmov-conversion"
79
80STATISTIC(NumOfSkippedCmovGroups, "Number of unsupported CMOV-groups");
81STATISTIC(NumOfCmovGroupCandidate, "Number of CMOV-group candidates");
82STATISTIC(NumOfLoopCandidate, "Number of CMOV-conversion profitable loops");
83STATISTIC(NumOfOptimizedCmovGroups, "Number of optimized CMOV-groups");
84
85
88 cl::desc("Enable the X86 cmov-to-branch optimization."),
90
93 cl::desc("Minimum gain per loop (in cycles) threshold."),
95
97 "x86-cmov-converter-force-mem-operand",
98 cl::desc("Convert cmovs to branches whenever they have memory operands."),
100
102 "x86-cmov-converter-force-all",
103 cl::desc("Convert all cmovs to branches."),
105
106namespace {
107
108
110public:
112
113 StringRef getPassName() const override { return "X86 cmov Conversion"; }
114 bool runOnMachineFunction(MachineFunction &MF) override;
115 void getAnalysisUsage(AnalysisUsage &AU) const override;
116
117
118 static char ID;
119
120private:
121 MachineRegisterInfo *MRI = nullptr;
122 const TargetInstrInfo *TII = nullptr;
123 const TargetRegisterInfo *TRI = nullptr;
124 MachineLoopInfo *MLI = nullptr;
125 TargetSchedModel TSchedModel;
126
127
128 using CmovGroup = SmallVector<MachineInstr *, 2>;
129 using CmovGroups = SmallVector<CmovGroup, 2>;
130
131
132
133
134
135
136
138 CmovGroups &CmovInstGroups,
139 bool IncludeLoads = false);
140
141
142
143
144
145
146
148 CmovGroups &CmovInstGroups);
149
150
151
152
153 void convertCmovInstsToBranches(SmallVectorImpl<MachineInstr *> &Group) const;
154};
155
156}
157
158char X86CmovConverterPass::ID = 0;
159
160void X86CmovConverterPass::getAnalysisUsage(AnalysisUsage &AU) const {
162 AU.addRequired();
163}
164
165bool X86CmovConverterPass::runOnMachineFunction(MachineFunction &MF) {
167 return false;
169 return false;
170
171
173 return false;
174
176 << "**********\n");
177
179 MLI = &getAnalysis().getLI();
180 const TargetSubtargetInfo &STI = MF.getSubtarget();
184 TSchedModel.init(&STI);
185
186
187
188
189
190
192 CmovGroups AllCmovGroups;
194 if (collectCmovCandidates(Blocks, AllCmovGroups, true)) {
195 for (auto &Group : AllCmovGroups) {
196
198 llvm::none_of(Group, [&](MachineInstr *I) { return I->mayLoad(); }))
199 continue;
200
201
202
203
205 convertCmovInstsToBranches(Group);
206 }
207 }
208
211 }
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
238
239
240 for (int i = 0; i < (int)Loops.size(); ++i)
242
243 for (MachineLoop *CurrLoop : Loops) {
244
245 if (!CurrLoop->getSubLoops().empty())
246 continue;
247
248
249 CmovGroups CmovInstGroups;
250
251 if (!collectCmovCandidates(CurrLoop->getBlocks(), CmovInstGroups))
252 continue;
253
254 if (!checkForProfitableCmovCandidates(CurrLoop->getBlocks(),
255 CmovInstGroups))
256 continue;
257
259 for (auto &Group : CmovInstGroups)
260 convertCmovInstsToBranches(Group);
261 }
262
264}
265
266bool X86CmovConverterPass::collectCmovCandidates(
268 bool IncludeLoads) {
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289 CmovGroup Group;
290 for (auto *MBB : Blocks) {
291 Group.clear();
292
293
296
297 bool FoundNonCMOVInst = false;
298
299 bool SkipGroup = false;
300
302
303 if (I.isDebugInstr())
304 continue;
305
307
308
310 .getFlag(MachineInstr::MIFlag::Unpredictable) &&
311 (IncludeLoads || .mayLoad())) {
312 if (Group.empty()) {
313
314 FirstCC = CC;
316
318 FoundNonCMOVInst = false;
319 SkipGroup = false;
320 }
321 Group.push_back(&I);
322
323
324 if (FoundNonCMOVInst || (CC != FirstCC && CC != FirstOppCC))
325
326 SkipGroup = true;
327 if (I.mayLoad()) {
329
330 MemOpCC = CC;
331 else if (CC != MemOpCC)
332
333 SkipGroup = true;
334 }
335
336 if (!SkipGroup &&
338 MRI->use_nodbg_instructions(I.defs().begin()->getReg()),
339 [&](MachineInstr &UseI) {
340 return UseI.getOpcode() == X86::SUBREG_TO_REG;
341 }))
342
343
344 SkipGroup = true;
345 continue;
346 }
347
348 if (Group.empty())
349 continue;
350
351
352 FoundNonCMOVInst = true;
353
354
355 if (I.definesRegister(X86::EFLAGS, nullptr)) {
356
357
358 if (!SkipGroup)
359 CmovInstGroups.push_back(Group);
360 else
361 ++NumOfSkippedCmovGroups;
362 Group.clear();
363 }
364 }
365
366
367 if (Group.empty())
368 continue;
369 if (!SkipGroup)
370 CmovInstGroups.push_back(Group);
371 else
372 ++NumOfSkippedCmovGroups;
373 }
374
375 NumOfCmovGroupCandidate += CmovInstGroups.size();
376 return !CmovInstGroups.empty();
377}
378
379
380
381
382static unsigned getDepthOfOptCmov(unsigned TrueOpDepth, unsigned FalseOpDepth) {
383
384
385
386
387 return std::max(
388 divideCeil(TrueOpDepth * 3 + FalseOpDepth, 4),
389 divideCeil(FalseOpDepth * 3 + TrueOpDepth, 4));
390}
391
392bool X86CmovConverterPass::checkForProfitableCmovCandidates(
394 struct DepthInfo {
395
397
398 unsigned OptDepth;
399 };
400
401 static const unsigned LoopIterations = 2;
402 DenseMap<MachineInstr *, DepthInfo> DepthMap;
403 DepthInfo LoopDepth[LoopIterations] = {{0, 0}, {0, 0}};
404 enum { PhyRegType = 0, VirRegType = 1, RegTypeNum = 2 };
405
406 DenseMap<Register, MachineInstr *> RegDefMaps[RegTypeNum];
407
408
409 DenseMap<MachineOperand *, MachineInstr *> OperandToDefMap;
410
411
412 DepthMap[nullptr] = {0, 0};
413
414 SmallPtrSet<MachineInstr *, 4> CmovInstructions;
415 for (auto &Group : CmovInstGroups)
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441 for (DepthInfo &MaxDepth : LoopDepth) {
442 for (auto *MBB : Blocks) {
443
444 RegDefMaps[PhyRegType].clear();
445 for (MachineInstr &MI : *MBB) {
446
447 if (MI.isDebugInstr())
448 continue;
449 unsigned MIDepth = 0;
450 unsigned MIDepthOpt = 0;
451 bool IsCMOV = CmovInstructions.count(&MI);
452 for (auto &MO : MI.uses()) {
453
454 if (!MO.isReg() || !MO.isUse())
455 continue;
458 if (MachineInstr *DefMI = RDM.lookup(Reg)) {
459 OperandToDefMap[&MO] = DefMI;
461 MIDepth = std::max(MIDepth, Info.Depth);
462 if (!IsCMOV)
463 MIDepthOpt = std::max(MIDepthOpt, Info.OptDepth);
464 }
465 }
466
467 if (IsCMOV)
469 DepthMap[OperandToDefMap.lookup(&MI.getOperand(1))].OptDepth,
470 DepthMap[OperandToDefMap.lookup(&MI.getOperand(2))].OptDepth);
471
472
473 for (auto &MO : MI.operands()) {
474 if (!MO.isReg() || !MO.isDef())
475 continue;
478 }
479
480 unsigned Latency = TSchedModel.computeInstrLatency(&MI);
482 MaxDepth.Depth = std::max(MaxDepth.Depth, MIDepth);
483 MaxDepth.OptDepth = std::max(MaxDepth.OptDepth, MIDepthOpt);
484 }
485 }
486 }
487
488 unsigned Diff[LoopIterations] = {LoopDepth[0].Depth - LoopDepth[0].OptDepth,
489 LoopDepth[1].Depth - LoopDepth[1].OptDepth};
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
519 return false;
520
521 bool WorthOptLoop = false;
522 if (Diff[1] == Diff[0])
523 WorthOptLoop = Diff[0] * 8 >= LoopDepth[0].Depth;
524 else if (Diff[1] > Diff[0])
525 WorthOptLoop =
526 (Diff[1] - Diff[0]) * 2 >= (LoopDepth[1].Depth - LoopDepth[0].Depth) &&
527 (Diff[1] * 8 >= LoopDepth[1].Depth);
528
529 if (!WorthOptLoop)
530 return false;
531
532 ++NumOfLoopCandidate;
533
534
535
536
537
538
539
540
541
542
543
544
546 CmovGroups TempGroups;
547 std::swap(TempGroups, CmovInstGroups);
548 for (auto &Group : TempGroups) {
549 bool WorthOpGroup = true;
550 for (auto *MI : Group) {
551
552
553
554 auto UIs = MRI->use_instructions(MI->defs().begin()->getReg());
556 unsigned Op = UIs.begin()->getOpcode();
557 if (Op == X86::MOV64rm || Op == X86::MOV32rm) {
558 WorthOpGroup = false;
559 break;
560 }
561 }
562
563 unsigned CondCost =
564 DepthMap[OperandToDefMap.lookup(&MI->getOperand(4))].Depth;
566 DepthMap[OperandToDefMap.lookup(&MI->getOperand(1))].Depth,
567 DepthMap[OperandToDefMap.lookup(&MI->getOperand(2))].Depth);
568 if (ValCost > CondCost || (CondCost - ValCost) * 4 < MispredictPenalty) {
569 WorthOpGroup = false;
570 break;
571 }
572 }
573
574 if (WorthOpGroup)
575 CmovInstGroups.push_back(Group);
576 }
577
578 return !CmovInstGroups.empty();
579}
580
582 if (MI->killsRegister(X86::EFLAGS, nullptr))
583 return false;
584
585
586
589
590
591 for (auto I = std::next(ItrMI), E = BB->end(); I != E; ++I) {
592 if (I->readsRegister(X86::EFLAGS, nullptr))
593 return true;
594 if (I->definesRegister(X86::EFLAGS, nullptr))
595 return false;
596 }
597
598
600 if (Succ->isLiveIn(X86::EFLAGS))
601 return true;
602
603 return false;
604}
605
606
607
608
609
612 "Last instruction in a CMOV group must be a CMOV instruction");
613
615 for (auto I = First->getIterator(), E = Last->getIterator(); I != E; I++) {
616 if (I->isDebugInstr())
618 }
619
620
622 for (auto *MI : DBGInstructions)
623 MBB->insertAfter(Last, MI->removeFromParent());
624}
625
626void X86CmovConverterPass::convertCmovInstsToBranches(
627 SmallVectorImpl<MachineInstr *> &Group) const {
628 assert(!Group.empty() && "No CMOV instructions to convert");
629 ++NumOfOptimizedCmovGroups;
630
631
632
633
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665 MachineInstr &MI = *Group.front();
666 MachineInstr *LastCMOV = Group.back();
668
671
672
673
674
677 }))
679
680 MachineBasicBlock *MBB = MI.getParent();
684
685 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(BB);
686 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(BB);
687 F->insert(It, FalseMBB);
688 F->insert(It, SinkMBB);
689
690
691
693 FalseMBB->addLiveIn(X86::EFLAGS);
695 }
696
697
701
702
705
706
708
709
711
712 MachineInstrBuilder MIB;
718
719
720
721
722
723 DenseMap<Register, Register> FalseBBRegRewriteTable;
725 auto &MI = *MIIt++;
726
727 if (.mayLoad()) {
728
731
732 while (true) {
733 auto FRIt = FalseBBRegRewriteTable.find(FalseReg);
734 if (FRIt == FalseBBRegRewriteTable.end())
735 break;
736 FalseReg = FRIt->second;
737 }
738 FalseBBRegRewriteTable[MI.getOperand(0).getReg()] = FalseReg;
739 continue;
740 }
741
742
743
744
746 "Can only handle memory-operand cmov instructions with a condition "
747 "opposite to the selected branch direction.");
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771 const TargetRegisterClass *RC = MRI->getRegClass(MI.getOperand(0).getReg());
772 Register TmpReg = MRI->createVirtualRegister(RC);
773
774
775 unsigned OldDebugInstrNum = MI.peekDebugInstrNum();
776 SmallVector<MachineInstr *, 4> NewMIs;
778 true,
779 false, NewMIs);
780 (void)Unfolded;
781 assert(Unfolded && "Should never fail to unfold a loading cmov!");
782
783
784
787 "Last new instruction isn't the expected CMOV!");
788 LLVM_DEBUG(dbgs() << "\tRewritten cmov: "; NewCMOV->dump());
790 if (&*MIItBegin == &MI)
792
793 if (OldDebugInstrNum)
794 NewCMOV->setDebugInstrNum(OldDebugInstrNum);
795
796
797
798 for (auto *NewMI : NewMIs) {
799 LLVM_DEBUG(dbgs() << "\tRewritten load instr: "; NewMI->dump());
800 FalseMBB->insert(FalseInsertionPoint, NewMI);
801
802 for (auto &MOp : NewMI->uses()) {
803 if (!MOp.isReg())
804 continue;
805 auto It = FalseBBRegRewriteTable.find(MOp.getReg());
806 if (It == FalseBBRegRewriteTable.end())
807 continue;
808
809 MOp.setReg(It->second);
810
811
812
813
814
815 MOp.setIsKill(false);
816 }
817 }
819
820
821 FalseBBRegRewriteTable[NewCMOV->getOperand(0).getReg()] = TmpReg;
822 }
823
824
825
826
827
828
829
830 DenseMap<Register, std::pair<Register, Register>> RegRewriteTable;
831
833 Register DestReg = MIIt->getOperand(0).getReg();
834 Register Op1Reg = MIIt->getOperand(1).getReg();
835 Register Op2Reg = MIIt->getOperand(2).getReg();
836
837
838
839
842
843 auto Op1Itr = RegRewriteTable.find(Op1Reg);
844 if (Op1Itr != RegRewriteTable.end())
845 Op1Reg = Op1Itr->second.first;
846
847 auto Op2Itr = RegRewriteTable.find(Op2Reg);
848 if (Op2Itr != RegRewriteTable.end())
849 Op2Reg = Op2Itr->second.second;
850
851
852
853
854 MIB = BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(X86::PHI), DestReg)
859 (void)MIB;
862
863
864
865 if (unsigned InstrNum = MIIt->peekDebugInstrNum())
867
868
869 RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
870 }
871
872
873
874 if (MIItBegin != MIItEnd)
875 F->getProperties().resetNoPHIs();
876
877
878 MBB->erase(MIItBegin, MIItEnd);
879
880
882 L->addBasicBlockToLoop(FalseMBB, *MLI);
883 L->addBasicBlockToLoop(SinkMBB, *MLI);
884 }
885}
886
888 false, false)
892
894 return new X86CmovConverterPass();
895}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
const TargetInstrInfo & TII
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
This file defines the DenseMap class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
static cl::opt< unsigned > GainCycleThreshold("select-opti-loop-cycle-gain-threshold", cl::desc("Minimum gain per loop (in cycles) threshold."), cl::init(4), cl::Hidden)
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static cl::opt< bool > DisableSelectOptimize("disable-select-optimize", cl::init(true), cl::Hidden, cl::desc("Disable the select-optimization pass from running"))
Disable the select optimization pass.
static cl::opt< bool > ForceAll("x86-cmov-converter-force-all", cl::desc("Convert all cmovs to branches."), cl::init(false), cl::Hidden)
static bool checkEFLAGSLive(MachineInstr *MI)
Definition X86CmovConversion.cpp:581
static unsigned getDepthOfOptCmov(unsigned TrueOpDepth, unsigned FalseOpDepth)
Definition X86CmovConversion.cpp:382
static cl::opt< unsigned > GainCycleThreshold("x86-cmov-converter-threshold", cl::desc("Minimum gain per loop (in cycles) threshold."), cl::init(4), cl::Hidden)
static cl::opt< bool > ForceMemOperand("x86-cmov-converter-force-mem-operand", cl::desc("Convert cmovs to branches whenever they have memory operands."), cl::init(true), cl::Hidden)
static void packCmovGroup(MachineInstr *First, MachineInstr *Last)
Given /p First CMOV instruction and /p Last CMOV instruction representing a group of CMOV instruction...
Definition X86CmovConversion.cpp:610
static cl::opt< bool > EnableCmovConverter("x86-cmov-converter", cl::desc("Enable the X86 cmov-to-branch optimization."), cl::init(true), cl::Hidden)
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
FunctionPass class - This class is used to implement most global optimizations.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< succ_iterator > successors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
Representation of each machine instruction.
void setDebugInstrNum(unsigned Num)
Set instruction number of this MachineInstr.
LLVM_ABI void dump() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
void insert_range(Range &&R)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, Register Reg, bool UnfoldLoad, bool UnfoldStore, SmallVectorImpl< MachineInstr * > &NewMIs) const
unfoldMemoryOperand - Separate a single instruction which folded a load or a store or a load and a st...
const MCSchedModel * getMCSchedModel() const
LLVM_ABI void init(const TargetSubtargetInfo *TSInfo, bool EnableSModel=true, bool EnableSItins=true)
Initialize the machine model for instruction scheduling.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
self_iterator getIterator()
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ BasicBlock
Various leaf nodes.
CondCode GetOppositeBranchCondition(CondCode CC)
GetOppositeBranchCondition - Return the inverse of the specified cond, e.g.
CondCode getCondFromCMov(const MachineInstr &MI)
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
FunctionPass * createX86CmovConverterPass()
This pass converts X86 cmov instructions into branch when profitable.
Definition X86CmovConversion.cpp:893
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
bool hasSingleElement(ContainerTy &&C)
Returns true if the given container only contains a single element.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
LLVM_ABI CGPassBuilderOption getCGPassBuilderOption()
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
unsigned MispredictPenalty