LLVM: lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
51
52using namespace llvm;
53
54#define DEBUG_TYPE "x86-avoid-SFB"
55
58 cl::desc("X86: Disable Store Forwarding Blocks fixup."), cl::init(false));
59
61 "x86-sfb-inspection-limit",
62 cl::desc("X86: Number of instructions backward to "
63 "inspect for store forwarding blocks."),
65
66namespace {
67
68using DisplacementSizeMap = std::map<int64_t, unsigned>;
69
71public:
72 static char ID;
74
75 StringRef getPassName() const override {
76 return "X86 Avoid Store Forwarding Blocks";
77 }
78
79 bool runOnMachineFunction(MachineFunction &MF) override;
80
81 void getAnalysisUsage(AnalysisUsage &AU) const override {
84 }
85
86private:
87 MachineRegisterInfo *MRI = nullptr;
88 const X86InstrInfo *TII = nullptr;
89 const X86RegisterInfo *TRI = nullptr;
91 BlockedLoadsStoresPairs;
92 SmallVector<MachineInstr *, 2> ForRemoval;
94
95
96
97 void findPotentiallylBlockedCopies(MachineFunction &MF);
98
99
100
101 void breakBlockedCopies(MachineInstr *LoadInst, MachineInstr *StoreInst,
102 const DisplacementSizeMap &BlockingStoresDispSizeMap);
103
104 void buildCopies(int Size, MachineInstr *LoadInst, int64_t LdDispImm,
105 MachineInstr *StoreInst, int64_t StDispImm,
106 int64_t LMMOffset, int64_t SMMOffset);
107
108 void buildCopy(MachineInstr *LoadInst, unsigned NLoadOpcode, int64_t LoadDisp,
109 MachineInstr *StoreInst, unsigned NStoreOpcode,
110 int64_t StoreDisp, unsigned Size, int64_t LMMOffset,
111 int64_t SMMOffset);
112
113 bool alias(const MachineMemOperand &Op1, const MachineMemOperand &Op2) const;
114
115 unsigned getRegSizeInBytes(MachineInstr *Inst);
116};
117
118}
119
120char X86AvoidSFBPass::ID = 0;
121
123 false, false)
127
129 return new X86AvoidSFBPass();
130}
131
133 return Opcode == X86::MOVUPSrm || Opcode == X86::MOVAPSrm ||
134 Opcode == X86::VMOVUPSrm || Opcode == X86::VMOVAPSrm ||
135 Opcode == X86::VMOVUPDrm || Opcode == X86::VMOVAPDrm ||
136 Opcode == X86::VMOVDQUrm || Opcode == X86::VMOVDQArm ||
137 Opcode == X86::VMOVUPSZ128rm || Opcode == X86::VMOVAPSZ128rm ||
138 Opcode == X86::VMOVUPDZ128rm || Opcode == X86::VMOVAPDZ128rm ||
139 Opcode == X86::VMOVDQU64Z128rm || Opcode == X86::VMOVDQA64Z128rm ||
140 Opcode == X86::VMOVDQU32Z128rm || Opcode == X86::VMOVDQA32Z128rm;
141}
143 return Opcode == X86::VMOVUPSYrm || Opcode == X86::VMOVAPSYrm ||
144 Opcode == X86::VMOVUPDYrm || Opcode == X86::VMOVAPDYrm ||
145 Opcode == X86::VMOVDQUYrm || Opcode == X86::VMOVDQAYrm ||
146 Opcode == X86::VMOVUPSZ256rm || Opcode == X86::VMOVAPSZ256rm ||
147 Opcode == X86::VMOVUPDZ256rm || Opcode == X86::VMOVAPDZ256rm ||
148 Opcode == X86::VMOVDQU64Z256rm || Opcode == X86::VMOVDQA64Z256rm ||
149 Opcode == X86::VMOVDQU32Z256rm || Opcode == X86::VMOVDQA32Z256rm;
150}
151
155
157 switch (LdOpcode) {
158 case X86::MOVUPSrm:
159 case X86::MOVAPSrm:
160 return StOpcode == X86::MOVUPSmr || StOpcode == X86::MOVAPSmr;
161 case X86::VMOVUPSrm:
162 case X86::VMOVAPSrm:
163 return StOpcode == X86::VMOVUPSmr || StOpcode == X86::VMOVAPSmr;
164 case X86::VMOVUPDrm:
165 case X86::VMOVAPDrm:
166 return StOpcode == X86::VMOVUPDmr || StOpcode == X86::VMOVAPDmr;
167 case X86::VMOVDQUrm:
168 case X86::VMOVDQArm:
169 return StOpcode == X86::VMOVDQUmr || StOpcode == X86::VMOVDQAmr;
170 case X86::VMOVUPSZ128rm:
171 case X86::VMOVAPSZ128rm:
172 return StOpcode == X86::VMOVUPSZ128mr || StOpcode == X86::VMOVAPSZ128mr;
173 case X86::VMOVUPDZ128rm:
174 case X86::VMOVAPDZ128rm:
175 return StOpcode == X86::VMOVUPDZ128mr || StOpcode == X86::VMOVAPDZ128mr;
176 case X86::VMOVUPSYrm:
177 case X86::VMOVAPSYrm:
178 return StOpcode == X86::VMOVUPSYmr || StOpcode == X86::VMOVAPSYmr;
179 case X86::VMOVUPDYrm:
180 case X86::VMOVAPDYrm:
181 return StOpcode == X86::VMOVUPDYmr || StOpcode == X86::VMOVAPDYmr;
182 case X86::VMOVDQUYrm:
183 case X86::VMOVDQAYrm:
184 return StOpcode == X86::VMOVDQUYmr || StOpcode == X86::VMOVDQAYmr;
185 case X86::VMOVUPSZ256rm:
186 case X86::VMOVAPSZ256rm:
187 return StOpcode == X86::VMOVUPSZ256mr || StOpcode == X86::VMOVAPSZ256mr;
188 case X86::VMOVUPDZ256rm:
189 case X86::VMOVAPDZ256rm:
190 return StOpcode == X86::VMOVUPDZ256mr || StOpcode == X86::VMOVAPDZ256mr;
191 case X86::VMOVDQU64Z128rm:
192 case X86::VMOVDQA64Z128rm:
193 return StOpcode == X86::VMOVDQU64Z128mr || StOpcode == X86::VMOVDQA64Z128mr;
194 case X86::VMOVDQU32Z128rm:
195 case X86::VMOVDQA32Z128rm:
196 return StOpcode == X86::VMOVDQU32Z128mr || StOpcode == X86::VMOVDQA32Z128mr;
197 case X86::VMOVDQU64Z256rm:
198 case X86::VMOVDQA64Z256rm:
199 return StOpcode == X86::VMOVDQU64Z256mr || StOpcode == X86::VMOVDQA64Z256mr;
200 case X86::VMOVDQU32Z256rm:
201 case X86::VMOVDQA32Z256rm:
202 return StOpcode == X86::VMOVDQU32Z256mr || StOpcode == X86::VMOVDQA32Z256mr;
203 default:
204 return false;
205 }
206}
207
209 bool PBlock = false;
210 PBlock |= Opcode == X86::MOV64mr || Opcode == X86::MOV64mi32 ||
211 Opcode == X86::MOV32mr || Opcode == X86::MOV32mi ||
212 Opcode == X86::MOV16mr || Opcode == X86::MOV16mi ||
213 Opcode == X86::MOV8mr || Opcode == X86::MOV8mi;
215 PBlock |= Opcode == X86::VMOVUPSmr || Opcode == X86::VMOVAPSmr ||
216 Opcode == X86::VMOVUPDmr || Opcode == X86::VMOVAPDmr ||
217 Opcode == X86::VMOVDQUmr || Opcode == X86::VMOVDQAmr ||
218 Opcode == X86::VMOVUPSZ128mr || Opcode == X86::VMOVAPSZ128mr ||
219 Opcode == X86::VMOVUPDZ128mr || Opcode == X86::VMOVAPDZ128mr ||
220 Opcode == X86::VMOVDQU64Z128mr ||
221 Opcode == X86::VMOVDQA64Z128mr ||
222 Opcode == X86::VMOVDQU32Z128mr || Opcode == X86::VMOVDQA32Z128mr;
223 return PBlock;
224}
225
231
233 switch (LoadOpcode) {
234 case X86::VMOVUPSYrm:
235 case X86::VMOVAPSYrm:
236 return X86::VMOVUPSrm;
237 case X86::VMOVUPDYrm:
238 case X86::VMOVAPDYrm:
239 return X86::VMOVUPDrm;
240 case X86::VMOVDQUYrm:
241 case X86::VMOVDQAYrm:
242 return X86::VMOVDQUrm;
243 case X86::VMOVUPSZ256rm:
244 case X86::VMOVAPSZ256rm:
245 return X86::VMOVUPSZ128rm;
246 case X86::VMOVUPDZ256rm:
247 case X86::VMOVAPDZ256rm:
248 return X86::VMOVUPDZ128rm;
249 case X86::VMOVDQU64Z256rm:
250 case X86::VMOVDQA64Z256rm:
251 return X86::VMOVDQU64Z128rm;
252 case X86::VMOVDQU32Z256rm:
253 case X86::VMOVDQA32Z256rm:
254 return X86::VMOVDQU32Z128rm;
255 default:
257 }
258 return 0;
259}
260
262 switch (StoreOpcode) {
263 case X86::VMOVUPSYmr:
264 case X86::VMOVAPSYmr:
265 return X86::VMOVUPSmr;
266 case X86::VMOVUPDYmr:
267 case X86::VMOVAPDYmr:
268 return X86::VMOVUPDmr;
269 case X86::VMOVDQUYmr:
270 case X86::VMOVDQAYmr:
271 return X86::VMOVDQUmr;
272 case X86::VMOVUPSZ256mr:
273 case X86::VMOVAPSZ256mr:
274 return X86::VMOVUPSZ128mr;
275 case X86::VMOVUPDZ256mr:
276 case X86::VMOVAPDZ256mr:
277 return X86::VMOVUPDZ128mr;
278 case X86::VMOVDQU64Z256mr:
279 case X86::VMOVDQA64Z256mr:
280 return X86::VMOVDQU64Z128mr;
281 case X86::VMOVDQU32Z256mr:
282 case X86::VMOVDQA32Z256mr:
283 return X86::VMOVDQU32Z128mr;
284 default:
286 }
287 return 0;
288}
289
293 assert(AddrOffset != -1 && "Expected Memory Operand");
295 return AddrOffset;
296}
297
302
307
308
309
310
318
319 if (!((Base.isReg() && Base.getReg() != X86::NoRegister) || Base.isFI()))
320 return false;
321 if (!Disp.isImm())
322 return false;
323 if (Scale.getImm() != 1)
324 return false;
325 if (!(Index.isReg() && Index.getReg() == X86::NoRegister))
326 return false;
327 if (!(Segment.isReg() && Segment.getReg() == X86::NoRegister))
328 return false;
329 return true;
330}
331
332
333
334
335
336
340 unsigned BlockCount = 0;
344 PBInst != E; ++PBInst) {
345 if (PBInst->isMetaInstruction())
346 continue;
347 BlockCount++;
348 if (BlockCount >= InspectionLimit)
349 break;
351 if (MI.getDesc().isCall())
352 return PotentialBlockers;
354 }
355
356
357
358
359 if (BlockCount < InspectionLimit) {
361 int LimitLeft = InspectionLimit - BlockCount;
363 int PredCount = 0;
365 if (PBInst.isMetaInstruction())
366 continue;
367 PredCount++;
368 if (PredCount >= LimitLeft)
369 break;
370 if (PBInst.getDesc().isCall())
371 break;
372 PotentialBlockers.push_back(&PBInst);
373 }
374 }
375 }
376 return PotentialBlockers;
377}
378
381 unsigned NStoreOpcode, int64_t StoreDisp,
382 unsigned Size, int64_t LMMOffset,
383 int64_t SMMOffset) {
385 MachineOperand &StoreBase = getBaseOperand(StoreInst);
386 MachineBasicBlock *MBB = LoadInst->getParent();
389
392 MachineInstr *NewLoad =
394 Reg1)
395 .add(LoadBase)
397 .addReg(X86::NoRegister)
399 .addReg(X86::NoRegister)
402 if (LoadBase.isReg())
405
406
407 MachineInstr *StInst = StoreInst;
410 if (PrevInstrIt.getNodePtr() == LoadInst)
411 StInst = LoadInst;
412 MachineInstr *NewStore =
414 .add(StoreBase)
416 .addReg(X86::NoRegister)
418 .addReg(X86::NoRegister)
422 if (StoreBase.isReg())
425 assert(StoreSrcVReg.isReg() && "Expected virtual register");
428}
429
430void X86AvoidSFBPass::buildCopies(int Size, MachineInstr *LoadInst,
431 int64_t LdDispImm, MachineInstr *StoreInst,
432 int64_t StDispImm, int64_t LMMOffset,
433 int64_t SMMOffset) {
434 int LdDisp = LdDispImm;
435 int StDisp = StDispImm;
436 while (Size > 0) {
441 StDisp, MOV128SZ, LMMOffset, SMMOffset);
446 continue;
447 }
450 buildCopy(LoadInst, X86::MOV64rm, LdDisp, StoreInst, X86::MOV64mr, StDisp,
451 MOV64SZ, LMMOffset, SMMOffset);
456 continue;
457 }
460 buildCopy(LoadInst, X86::MOV32rm, LdDisp, StoreInst, X86::MOV32mr, StDisp,
461 MOV32SZ, LMMOffset, SMMOffset);
466 continue;
467 }
470 buildCopy(LoadInst, X86::MOV16rm, LdDisp, StoreInst, X86::MOV16mr, StDisp,
471 MOV16SZ, LMMOffset, SMMOffset);
476 continue;
477 }
480 buildCopy(LoadInst, X86::MOV8rm, LdDisp, StoreInst, X86::MOV8mr, StDisp,
481 MOV8SZ, LMMOffset, SMMOffset);
486 continue;
487 }
488 }
489 assert(Size == 0 && "Wrong size division");
490}
491
495 auto *StorePrevNonDbgInstr =
498 .getNodePtr();
499 if (LoadBase.isReg()) {
501
502
503
504
505 if (StorePrevNonDbgInstr == LoadInst)
508 }
509 if (StoreBase.isReg()) {
511 if (StorePrevNonDbgInstr == LoadInst)
514 }
515}
516
517bool X86AvoidSFBPass::alias(const MachineMemOperand &Op1,
518 const MachineMemOperand &Op2) const {
520 return true;
521
525
529}
530
531void X86AvoidSFBPass::findPotentiallylBlockedCopies(MachineFunction &MF) {
532 for (auto &MBB : MF)
535 continue;
536 Register DefVR = MI.getOperand(0).getReg();
537 if (->hasOneNonDBGUse(DefVR))
538 continue;
539 for (MachineOperand &StoreMO :
541 MachineInstr &StoreMI = *StoreMO.getParent();
542
543 if (StoreMI.getParent() == MI.getParent() &&
549 BlockedLoadsStoresPairs.push_back(std::make_pair(&MI, &StoreMI));
550 }
551 }
552 }
553}
554
555unsigned X86AvoidSFBPass::getRegSizeInBytes(MachineInstr *LoadInst) {
557 return TRI->getRegSizeInBits(*TRC) / 8;
558}
559
560void X86AvoidSFBPass::breakBlockedCopies(
561 MachineInstr *LoadInst, MachineInstr *StoreInst,
562 const DisplacementSizeMap &BlockingStoresDispSizeMap) {
565 int64_t LMMOffset = 0;
566 int64_t SMMOffset = 0;
567
568 int64_t LdDisp1 = LdDispImm;
569 int64_t LdDisp2 = 0;
570 int64_t StDisp1 = StDispImm;
571 int64_t StDisp2 = 0;
572 unsigned Size1 = 0;
573 unsigned Size2 = 0;
574 int64_t LdStDelta = StDispImm - LdDispImm;
575
576 for (auto DispSizePair : BlockingStoresDispSizeMap) {
577 LdDisp2 = DispSizePair.first;
578 StDisp2 = DispSizePair.first + LdStDelta;
579 Size2 = DispSizePair.second;
580
581 if (LdDisp2 < LdDisp1) {
582 int OverlapDelta = LdDisp1 - LdDisp2;
583 LdDisp2 += OverlapDelta;
584 StDisp2 += OverlapDelta;
585 Size2 -= OverlapDelta;
586 }
587 Size1 = LdDisp2 - LdDisp1;
588
589
590
591 buildCopies(Size1, LoadInst, LdDisp1, StoreInst, StDisp1, LMMOffset,
592 SMMOffset);
593
594 buildCopies(Size2, LoadInst, LdDisp2, StoreInst, StDisp2, LMMOffset + Size1,
595 SMMOffset + Size1);
596 LdDisp1 = LdDisp2 + Size2;
597 StDisp1 = StDisp2 + Size2;
598 LMMOffset += Size1 + Size2;
599 SMMOffset += Size1 + Size2;
600 }
601 unsigned Size3 = (LdDispImm + getRegSizeInBytes(LoadInst)) - LdDisp1;
602 buildCopies(Size3, LoadInst, LdDisp1, StoreInst, StDisp1, LMMOffset,
603 LMMOffset);
604}
605
610 if (LoadBase.isReg() != StoreBase.isReg())
611 return false;
612 if (LoadBase.isReg())
613 return LoadBase.getReg() == StoreBase.getReg();
615}
616
618 int64_t StoreDispImm, unsigned StoreSize) {
619 return ((StoreDispImm >= LoadDispImm) &&
620 (StoreDispImm <= LoadDispImm + (LoadSize - StoreSize)));
621}
622
623
624static void
626 int64_t DispImm, unsigned Size) {
627 auto [It, Inserted] = BlockingStoresDispSizeMap.try_emplace(DispImm, Size);
628
629 if (!Inserted && It->second > Size)
630 It->second = Size;
631}
632
633
634static void
636 if (BlockingStoresDispSizeMap.size() <= 1)
637 return;
638
640 for (auto DispSizePair : BlockingStoresDispSizeMap) {
641 int64_t CurrDisp = DispSizePair.first;
642 unsigned CurrSize = DispSizePair.second;
643 while (DispSizeStack.size()) {
644 int64_t PrevDisp = DispSizeStack.back().first;
645 unsigned PrevSize = DispSizeStack.back().second;
646 if (CurrDisp + CurrSize > PrevDisp + PrevSize)
647 break;
649 }
650 DispSizeStack.push_back(DispSizePair);
651 }
652 BlockingStoresDispSizeMap.clear();
653 for (auto Disp : DispSizeStack)
654 BlockingStoresDispSizeMap.insert(Disp);
655}
656
657bool X86AvoidSFBPass::runOnMachineFunction(MachineFunction &MF) {
659
662 return false;
663
665 assert(MRI->isSSA() && "Expected MIR to be in SSA form");
668 AA = &getAnalysis().getAAResults();
669 LLVM_DEBUG(dbgs() << "Start X86AvoidStoreForwardBlocks\n";);
670
671 findPotentiallylBlockedCopies(MF);
672
673 for (auto LoadStoreInstPair : BlockedLoadsStoresPairs) {
674 MachineInstr *LoadInst = LoadStoreInstPair.first;
676 DisplacementSizeMap BlockingStoresDispSizeMap;
677
678 SmallVector<MachineInstr *, 2> PotentialBlockers =
680 for (auto *PBInst : PotentialBlockers) {
684 continue;
686 unsigned PBstSize = (*PBInst->memoperands_begin())->getSize().getValue();
687
688
689
690
692 isBlockingStore(LdDispImm, getRegSizeInBytes(LoadInst), PBstDispImm,
693 PBstSize))
695 PBstSize);
696 }
697
698 if (BlockingStoresDispSizeMap.empty())
699 continue;
700
701
702
703
704 MachineInstr *StoreInst = LoadStoreInstPair.second;
705 LLVM_DEBUG(dbgs() << "Blocked load and store instructions: \n");
710 breakBlockedCopies(LoadInst, StoreInst, BlockingStoresDispSizeMap);
714 }
715 for (auto *RemovedInst : ForRemoval) {
716 RemovedInst->eraseFromParent();
717 }
718 ForRemoval.clear();
719 BlockedLoadsStoresPairs.clear();
720 LLVM_DEBUG(dbgs() << "End X86AvoidStoreForwardBlocks\n";);
721
723}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
const TargetInstrInfo & TII
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Register const TargetRegisterInfo * TRI
Promote Memory to Register
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
static unsigned getYMMtoXMMLoadOpcode(unsigned LoadOpcode)
Definition X86AvoidStoreForwardingBlocks.cpp:232
static bool isPotentialBlockedMemCpyLd(unsigned Opcode)
Definition X86AvoidStoreForwardingBlocks.cpp:152
static bool isPotentialBlockedMemCpyPair(unsigned LdOpcode, unsigned StOpcode)
Definition X86AvoidStoreForwardingBlocks.cpp:156
static bool isPotentialBlockingStoreInst(unsigned Opcode, unsigned LoadOpcode)
Definition X86AvoidStoreForwardingBlocks.cpp:208
static const int MOV64SZ
Definition X86AvoidStoreForwardingBlocks.cpp:227
static const int MOV8SZ
Definition X86AvoidStoreForwardingBlocks.cpp:230
static bool isXMMLoadOpcode(unsigned Opcode)
Definition X86AvoidStoreForwardingBlocks.cpp:132
static int getAddrOffset(const MachineInstr *MI)
Definition X86AvoidStoreForwardingBlocks.cpp:290
static cl::opt< unsigned > X86AvoidSFBInspectionLimit("x86-sfb-inspection-limit", cl::desc("X86: Number of instructions backward to " "inspect for store forwarding blocks."), cl::init(20), cl::Hidden)
static bool isBlockingStore(int64_t LoadDispImm, unsigned LoadSize, int64_t StoreDispImm, unsigned StoreSize)
Definition X86AvoidStoreForwardingBlocks.cpp:617
static bool isRelevantAddressingMode(MachineInstr *MI)
Definition X86AvoidStoreForwardingBlocks.cpp:311
static cl::opt< bool > DisableX86AvoidStoreForwardBlocks("x86-disable-avoid-SFB", cl::Hidden, cl::desc("X86: Disable Store Forwarding Blocks fixup."), cl::init(false))
static void removeRedundantBlockingStores(DisplacementSizeMap &BlockingStoresDispSizeMap)
Definition X86AvoidStoreForwardingBlocks.cpp:635
static const int MOV16SZ
Definition X86AvoidStoreForwardingBlocks.cpp:229
static bool hasSameBaseOpValue(MachineInstr *LoadInst, MachineInstr *StoreInst)
Definition X86AvoidStoreForwardingBlocks.cpp:606
static void updateBlockingStoresDispSizeMap(DisplacementSizeMap &BlockingStoresDispSizeMap, int64_t DispImm, unsigned Size)
Definition X86AvoidStoreForwardingBlocks.cpp:625
static MachineOperand & getBaseOperand(MachineInstr *MI)
Definition X86AvoidStoreForwardingBlocks.cpp:298
static unsigned getYMMtoXMMStoreOpcode(unsigned StoreOpcode)
Definition X86AvoidStoreForwardingBlocks.cpp:261
static SmallVector< MachineInstr *, 2 > findPotentialBlockers(MachineInstr *LoadInst)
Definition X86AvoidStoreForwardingBlocks.cpp:338
static void updateKillStatus(MachineInstr *LoadInst, MachineInstr *StoreInst)
Definition X86AvoidStoreForwardingBlocks.cpp:492
static const int MOV32SZ
Definition X86AvoidStoreForwardingBlocks.cpp:228
static MachineOperand & getDispOperand(MachineInstr *MI)
Definition X86AvoidStoreForwardingBlocks.cpp:303
static bool isYMMLoadOpcode(unsigned Opcode)
Definition X86AvoidStoreForwardingBlocks.cpp:142
static const int MOV128SZ
Definition X86AvoidStoreForwardingBlocks.cpp:226
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
A trivial helper function to check to see if the specified pointers are no-alias.
AnalysisUsage & addRequired()
FunctionPass class - This class is used to implement most global optimizations.
An instruction for reading from memory.
TypeSize getValue() const
Describe properties that are true of each instruction in the target description file.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
instr_iterator instr_begin()
Instructions::iterator instr_iterator
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void dump() const
const MachineOperand & getOperand(unsigned i) const
LocationSize getSize() const
Return the size in bytes of the memory reference.
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
const Value * getValue() const
Return the base address of the memory access.
int64_t getOffset() const
For normal values, this is a byte offset added to the base address.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
Register getReg() const
getReg - Returns the register number.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
virtual const TargetRegisterClass * getRegClass(const MCInstrDesc &MCID, unsigned OpNum) const
Given a machine instruction descriptor, returns the register class constraint for OpNum,...
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
int getMemoryOperandNo(uint64_t TSFlags)
unsigned getOperandBias(const MCInstrDesc &Desc)
Compute whether all of the def operands are repeated in the uses and therefore should be skipped.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
auto reverse(ContainerTy &&C)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
IterT prev_nodbg(IterT It, IterT Begin, bool SkipPseudoOp=true)
Decrement It, then continue decrementing it while it points to a debug instruction.
FunctionPass * createX86AvoidStoreForwardingBlocks()
Return a pass that avoids creating store forward block issues in the hardware.
Definition X86AvoidStoreForwardingBlocks.cpp:128