LLVM: lib/Target/Mips/MipsInstructionSelector.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
21#include "llvm/IR/IntrinsicsMips.h"
22
23#define DEBUG_TYPE "mips-isel"
24
25using namespace llvm;
26
27namespace {
28
29#define GET_GLOBALISEL_PREDICATE_BITSET
30#include "MipsGenGlobalISel.inc"
31#undef GET_GLOBALISEL_PREDICATE_BITSET
32
34public:
37
40
41private:
45 bool materialize32BitImm(Register DestReg, APInt Imm,
58
64
65#define GET_GLOBALISEL_PREDICATES_DECL
66#include "MipsGenGlobalISel.inc"
67#undef GET_GLOBALISEL_PREDICATES_DECL
68
69#define GET_GLOBALISEL_TEMPORARIES_DECL
70#include "MipsGenGlobalISel.inc"
71#undef GET_GLOBALISEL_TEMPORARIES_DECL
72};
73
74}
75
76#define GET_GLOBALISEL_IMPL
77#include "MipsGenGlobalISel.inc"
78#undef GET_GLOBALISEL_IMPL
79
80MipsInstructionSelector::MipsInstructionSelector(
83 : TM(TM), STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()),
84 RBI(RBI),
85
87#include "MipsGenGlobalISel.inc"
90#include "MipsGenGlobalISel.inc"
92{
93}
94
95bool MipsInstructionSelector::isRegInGprb(Register Reg,
98}
99
100bool MipsInstructionSelector::isRegInFprb(Register Reg,
101 MachineRegisterInfo &MRI) const {
103}
104
105bool MipsInstructionSelector::selectCopy(MachineInstr &I,
106 MachineRegisterInfo &MRI) const {
107 Register DstReg = I.getOperand(0).getReg();
109 return true;
110
111 const TargetRegisterClass *RC = getRegClassForTypeOnBank(DstReg, MRI);
113 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
114 << " operand\n");
115 return false;
116 }
117 return true;
118}
119
120const TargetRegisterClass *MipsInstructionSelector::getRegClassForTypeOnBank(
122 const LLT Ty = MRI.getType(Reg);
124
125 if (isRegInGprb(Reg, MRI)) {
127 "Register class not available for LLT, register bank combination");
128 return &Mips::GPR32RegClass;
129 }
130
131 if (isRegInFprb(Reg, MRI)) {
133 assert((TySize == 32 || TySize == 64) &&
134 "Register class not available for LLT, register bank combination");
135 if (TySize == 32)
136 return &Mips::FGR32RegClass;
137 return STI.isFP64bit() ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
138 }
139 }
140
142}
143
144bool MipsInstructionSelector::materialize32BitImm(Register DestReg, APInt Imm,
145 MachineIRBuilder &B) const {
146 assert(Imm.getBitWidth() == 32 && "Unsupported immediate size.");
147
148 if (Imm.getHiBits(16).isZero()) {
149 MachineInstr *Inst =
150 B.buildInstr(Mips::ORi, {DestReg}, {Register(Mips::ZERO)})
151 .addImm(Imm.getLoBits(16).getLimitedValue());
153 }
154
155 if (Imm.getLoBits(16).isZero()) {
156 MachineInstr *Inst = B.buildInstr(Mips::LUi, {DestReg}, {})
157 .addImm(Imm.getHiBits(16).getLimitedValue());
159 }
160
161 if (Imm.isSignedIntN(16)) {
162 MachineInstr *Inst =
163 B.buildInstr(Mips::ADDiu, {DestReg}, {Register(Mips::ZERO)})
164 .addImm(Imm.getLoBits(16).getLimitedValue());
166 }
167
168 Register LUiReg = B.getMRI()->createVirtualRegister(&Mips::GPR32RegClass);
169 MachineInstr *LUi = B.buildInstr(Mips::LUi, {LUiReg}, {})
170 .addImm(Imm.getHiBits(16).getLimitedValue());
171 MachineInstr *ORi = B.buildInstr(Mips::ORi, {DestReg}, {LUiReg})
172 .addImm(Imm.getLoBits(16).getLimitedValue());
174 return false;
176 return false;
177 return true;
178}
179
180
181unsigned
182MipsInstructionSelector::selectLoadStoreOpCode(MachineInstr &I,
183 MachineRegisterInfo &MRI) const {
184 const Register ValueReg = I.getOperand(0).getReg();
185 const LLT Ty = MRI.getType(ValueReg);
187 const unsigned MemSizeInBytes =
188 (*I.memoperands_begin())->getSize().getValue();
189 unsigned Opc = I.getOpcode();
190 const bool isStore = Opc == TargetOpcode::G_STORE;
191
192 if (isRegInGprb(ValueReg, MRI)) {
194 (Ty.isPointer() && TySize == 32 && MemSizeInBytes == 4)) &&
195 "Unsupported register bank, LLT, MemSizeInBytes combination");
196 (void)TySize;
198 switch (MemSizeInBytes) {
199 case 4:
200 return Mips::SW;
201 case 2:
202 return Mips::SH;
203 case 1:
204 return Mips::SB;
205 default:
206 return Opc;
207 }
208 else
209
210 switch (MemSizeInBytes) {
211 case 4:
212 return Mips::LW;
213 case 2:
214 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LH : Mips::LHu;
215 case 1:
216 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LB : Mips::LBu;
217 default:
218 return Opc;
219 }
220 }
221
222 if (isRegInFprb(ValueReg, MRI)) {
224 assert(((TySize == 32 && MemSizeInBytes == 4) ||
225 (TySize == 64 && MemSizeInBytes == 8)) &&
226 "Unsupported register bank, LLT, MemSizeInBytes combination");
227
228 if (MemSizeInBytes == 4)
229 return isStore ? Mips::SWC1 : Mips::LWC1;
230
232 return isStore ? Mips::SDC164 : Mips::LDC164;
233 return isStore ? Mips::SDC1 : Mips::LDC1;
234 }
235
237 assert(STI.hasMSA() && "Vector instructions require target with MSA.");
238 assert((TySize == 128 && MemSizeInBytes == 16) &&
239 "Unsupported register bank, LLT, MemSizeInBytes combination");
241 case 8:
242 return isStore ? Mips::ST_B : Mips::LD_B;
243 case 16:
244 return isStore ? Mips::ST_H : Mips::LD_H;
245 case 32:
246 return isStore ? Mips::ST_W : Mips::LD_W;
247 case 64:
248 return isStore ? Mips::ST_D : Mips::LD_D;
249 default:
250 return Opc;
251 }
252 }
253 }
254
255 return Opc;
256}
257
258bool MipsInstructionSelector::buildUnalignedStore(
259 MachineInstr &I, unsigned Opc, MachineOperand &BaseAddr, unsigned Offset,
260 MachineMemOperand *MMO) const {
261 MachineInstr *NewInst =
264 .add(BaseAddr)
268 return false;
269 return true;
270}
271
272bool MipsInstructionSelector::buildUnalignedLoad(
273 MachineInstr &I, unsigned Opc, Register Dest, MachineOperand &BaseAddr,
274 unsigned Offset, Register TiedDest, MachineMemOperand *MMO) const {
275 MachineInstr *NewInst =
278 .add(BaseAddr)
283 return false;
284 return true;
285}
286
287bool MipsInstructionSelector::select(MachineInstr &I) {
288
292
294 if (I.isCopy())
296
297 return true;
298 }
299
300 if (I.getOpcode() == Mips::G_MUL &&
301 isRegInGprb(I.getOperand(0).getReg(), MRI)) {
302 MachineInstr *Mul = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MUL))
307 return false;
310
311 I.eraseFromParent();
312 return true;
313 }
314
315 if (selectImpl(I, *CoverageInfo))
316 return true;
317
318 MachineInstr *MI = nullptr;
319 using namespace TargetOpcode;
320
321 switch (I.getOpcode()) {
322 case G_UMULH: {
323 Register PseudoMULTuReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
324 MachineInstr *PseudoMULTu, *PseudoMove;
325
326 PseudoMULTu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMULTu))
327 .addDef(PseudoMULTuReg)
331 return false;
332
333 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMFHI))
334 .addDef(I.getOperand(0).getReg())
335 .addUse(PseudoMULTuReg);
337 return false;
338
339 I.eraseFromParent();
340 return true;
341 }
342 case G_PTR_ADD: {
347 break;
348 }
349 case G_INTTOPTR:
350 case G_PTRTOINT: {
353 }
354 case G_FRAME_INDEX: {
359 break;
360 }
361 case G_BRJT: {
362 unsigned EntrySize =
365 "Non-power-of-two jump-table entry size not supported.");
366
367 Register JTIndex = MRI.createVirtualRegister(&Mips::GPR32RegClass);
368 MachineInstr *SLL = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SLL))
370 .addUse(I.getOperand(2).getReg())
373 return false;
374
375 Register DestAddress = MRI.createVirtualRegister(&Mips::GPR32RegClass);
376 MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
378 .addUse(I.getOperand(0).getReg())
381 return false;
382
383 Register Dest = MRI.createVirtualRegister(&Mips::GPR32RegClass);
384 MachineInstr *LW =
392 return false;
393
395 Register DestTmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
397 MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
401 ->getGlobalBaseRegForGlobalISel(MF));
403 return false;
404 }
405
406 MachineInstr *Branch =
407 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch))
410 return false;
411
412 I.eraseFromParent();
413 return true;
414 }
415 case G_BRINDIRECT: {
416 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch))
418 break;
419 }
420 case G_PHI: {
421 const Register DestReg = I.getOperand(0).getReg();
422
423 const TargetRegisterClass *DefRC = nullptr;
425 DefRC = TRI.getRegClass(DestReg);
426 else
427 DefRC = getRegClassForTypeOnBank(DestReg, MRI);
428
429 I.setDesc(TII.get(TargetOpcode::PHI));
431 }
432 case G_STORE:
433 case G_LOAD:
434 case G_ZEXTLOAD:
435 case G_SEXTLOAD: {
436 auto MMO = *I.memoperands_begin();
437 MachineOperand BaseAddr = I.getOperand(1);
438 int64_t SignedOffset = 0;
439
440
441
442
443
444
445
446 MachineInstr *Addr = MRI.getVRegDef(I.getOperand(1).getReg());
447 if (Addr->getOpcode() == G_PTR_ADD) {
449 if (Offset->getOpcode() == G_CONSTANT) {
450 APInt OffsetValue = Offset->getOperand(1).getCImm()->getValue();
454 }
455 }
456 }
457
458
462 if (MMO->getSize() != 4 || !isRegInGprb(I.getOperand(0).getReg(), MRI))
463 return false;
464
465 if (I.getOpcode() == G_STORE) {
466 if (!buildUnalignedStore(I, Mips::SWL, BaseAddr, SignedOffset + 3, MMO))
467 return false;
468 if (!buildUnalignedStore(I, Mips::SWR, BaseAddr, SignedOffset, MMO))
469 return false;
470 I.eraseFromParent();
471 return true;
472 }
473
474 if (I.getOpcode() == G_LOAD) {
475 Register ImplDef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
476 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::IMPLICIT_DEF))
478 Register Tmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
479 if (!buildUnalignedLoad(I, Mips::LWL, Tmp, BaseAddr, SignedOffset + 3,
480 ImplDef, MMO))
481 return false;
482 if (!buildUnalignedLoad(I, Mips::LWR, I.getOperand(0).getReg(),
483 BaseAddr, SignedOffset, Tmp, MMO))
484 return false;
485 I.eraseFromParent();
486 return true;
487 }
488
489 return false;
490 }
491
492 const unsigned NewOpc = selectLoadStoreOpCode(I, MRI);
493 if (NewOpc == I.getOpcode())
494 return false;
495
498 .add(BaseAddr)
499 .addImm(SignedOffset)
501 break;
502 }
503 case G_UDIV:
504 case G_UREM:
505 case G_SDIV:
506 case G_SREM: {
507 Register HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
508 bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV;
509 bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV;
510
511 MachineInstr *PseudoDIV, *PseudoMove;
513 TII.get(IsSigned ? Mips::PseudoSDIV : Mips::PseudoUDIV))
518 return false;
519
520 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(),
521 TII.get(IsDiv ? Mips::PseudoMFLO : Mips::PseudoMFHI))
522 .addDef(I.getOperand(0).getReg())
525 return false;
526
527 I.eraseFromParent();
528 return true;
529 }
530 case G_SELECT: {
531
537 break;
538 }
539 case G_UNMERGE_VALUES: {
540 if (I.getNumOperands() != 3)
541 return false;
542 Register Src = I.getOperand(2).getReg();
545 if (!isRegInFprb(Src, MRI) ||
546 !(isRegInGprb(Lo, MRI) && isRegInGprb(Hi, MRI)))
547 return false;
548
549 unsigned Opcode =
550 STI.isFP64bit() ? Mips::ExtractElementF64_64 : Mips::ExtractElementF64;
551
552 MachineInstr *ExtractLo = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
557 return false;
558
559 MachineInstr *ExtractHi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
564 return false;
565
566 I.eraseFromParent();
567 return true;
568 }
569 case G_IMPLICIT_DEF: {
570 Register Dst = I.getOperand(0).getReg();
571 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::IMPLICIT_DEF))
573
574
575 MRI.setRegClass(Dst, getRegClassForTypeOnBank(Dst, MRI));
576 break;
577 }
578 case G_CONSTANT: {
580 if (!materialize32BitImm(I.getOperand(0).getReg(),
581 I.getOperand(1).getCImm()->getValue(), B))
582 return false;
583
584 I.eraseFromParent();
585 return true;
586 }
587 case G_FCONSTANT: {
588 const APFloat &FPimm = I.getOperand(1).getFPImm()->getValueAPF();
590 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
591
592 if (Size == 32) {
593 Register GPRReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
595 if (!materialize32BitImm(GPRReg, APImm, B))
596 return false;
597
598 MachineInstrBuilder MTC1 =
599 B.buildInstr(Mips::MTC1, {I.getOperand(0).getReg()}, {GPRReg});
601 return false;
602 }
603 if (Size == 64) {
604 Register GPRRegHigh = MRI.createVirtualRegister(&Mips::GPR32RegClass);
605 Register GPRRegLow = MRI.createVirtualRegister(&Mips::GPR32RegClass);
607 if (!materialize32BitImm(GPRRegHigh, APImm.getHiBits(32).trunc(32), B))
608 return false;
609 if (!materialize32BitImm(GPRRegLow, APImm.getLoBits(32).trunc(32), B))
610 return false;
611
612 MachineInstrBuilder PairF64 = B.buildInstr(
613 STI.isFP64bit() ? Mips::BuildPairF64_64 : Mips::BuildPairF64,
614 {I.getOperand(0).getReg()}, {GPRRegLow, GPRRegHigh});
616 return false;
617 }
618
619 I.eraseFromParent();
620 return true;
621 }
622 case G_FABS: {
623 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
624 unsigned FABSOpcode =
625 Size == 32 ? Mips::FABS_S
626 : STI.isFP64bit() ? Mips::FABS_D64 : Mips::FABS_D32;
630 break;
631 }
632 case G_FPTOSI: {
633 unsigned FromSize = MRI.getType(I.getOperand(1).getReg()).getSizeInBits();
634 unsigned ToSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
635 (void)ToSize;
636 assert((ToSize == 32) && "Unsupported integer size for G_FPTOSI");
637 assert((FromSize == 32 || FromSize == 64) &&
638 "Unsupported floating point size for G_FPTOSI");
639
640 unsigned Opcode;
641 if (FromSize == 32)
642 Opcode = Mips::TRUNC_W_S;
643 else
644 Opcode = STI.isFP64bit() ? Mips::TRUNC_W_D64 : Mips::TRUNC_W_D32;
645 Register ResultInFPR = MRI.createVirtualRegister(&Mips::FGR32RegClass);
646 MachineInstr *Trunc = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
648 .addUse(I.getOperand(1).getReg());
650 return false;
651
652 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MFC1))
653 .addDef(I.getOperand(0).getReg())
654 .addUse(ResultInFPR);
656 return false;
657
658 I.eraseFromParent();
659 return true;
660 }
661 case G_GLOBAL_VALUE: {
662 const llvm::GlobalValue *GVal = I.getOperand(1).getGlobal();
664 MachineInstr *LWGOT = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
665 .addDef(I.getOperand(0).getReg())
667 ->getGlobalBaseRegForGlobalISel(MF))
669
670
671
672
675 else
681 return false;
682
684 Register LWGOTDef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
686
687 MachineInstr *ADDiu =
689 .addDef(I.getOperand(0).getReg())
694 return false;
695 }
696 } else {
697 Register LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
698
699 MachineInstr *LUi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
704 return false;
705
706 MachineInstr *ADDiu =
708 .addDef(I.getOperand(0).getReg())
713 return false;
714 }
715 I.eraseFromParent();
716 return true;
717 }
718 case G_JUMP_TABLE: {
721 .addDef(I.getOperand(0).getReg())
723 ->getGlobalBaseRegForGlobalISel(MF))
728 } else {
731 .addDef(I.getOperand(0).getReg())
733 }
734 break;
735 }
736 case G_ICMP: {
738 unsigned Opcode;
742
743 bool hasImm() const {
744 if (Opcode == Mips::SLTiu || Opcode == Mips::XORi)
745 return true;
746 return false;
747 }
748 };
749
751 Register ICMPReg = I.getOperand(0).getReg();
752 Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
757
758 switch (Cond) {
761 Instructions.emplace_back(Mips::SLTiu, ICMPReg, Temp, 1);
762 break;
765 Instructions.emplace_back(Mips::SLTu, ICMPReg, Mips::ZERO, Temp);
766 break;
769 break;
772 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
773 break;
776 break;
779 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
780 break;
783 break;
786 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
787 break;
790 break;
793 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
794 break;
795 default:
796 return false;
797 }
798
800 for (const struct Instr &Instruction : Instructions) {
801 MachineInstrBuilder MIB = B.buildInstr(
802 Instruction.Opcode, {Instruction.Def}, {Instruction.LHS});
803
806 else
808
810 return false;
811 }
812
813 I.eraseFromParent();
814 return true;
815 }
816 case G_FCMP: {
817 unsigned MipsFCMPCondCode;
818 bool isLogicallyNegated;
820 I.getOperand(1).getPredicate())) {
825 break;
830 break;
832 case CmpInst::FCMP_ONE:
835 break;
837 case CmpInst::FCMP_UGE:
840 break;
842 case CmpInst::FCMP_OGE:
845 break;
850 break;
855 break;
856 default:
857 return false;
858 }
859
860
861
862
863
864 unsigned MoveOpcode = isLogicallyNegated ? Mips::MOVT_I : Mips::MOVF_I;
865
866 Register TrueInReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
871
872 unsigned Size = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
873 unsigned FCMPOpcode =
874 Size == 32 ? Mips::FCMP_S32
875 : STI.isFP64bit() ? Mips::FCMP_D64 : Mips::FCMP_D32;
876 MachineInstr *FCMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FCMPOpcode))
877 .addUse(I.getOperand(2).getReg())
878 .addUse(I.getOperand(3).getReg())
879 .addImm(MipsFCMPCondCode);
881 return false;
882
883 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(MoveOpcode))
884 .addDef(I.getOperand(0).getReg())
889 return false;
890
891 I.eraseFromParent();
892 return true;
893 }
894 case G_FENCE: {
896 break;
897 }
898 case G_VASTART: {
899 MipsFunctionInfo *FuncInfo = MF.getInfo();
901
902 Register LeaReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
903 MachineInstr *LEA_ADDiu =
904 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LEA_ADDiu))
909 return false;
910
913 .addUse(I.getOperand(0).getReg())
916 return false;
917
918 I.eraseFromParent();
919 return true;
920 }
921 default:
922 return false;
923 }
924
925 I.eraseFromParent();
927}
928
929namespace llvm {
930InstructionSelector *
934 return new MipsInstructionSelector(TM, Subtarget, RBI);
935}
936}
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool isStore(int Opcode)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
const HexagonInstrInfo * TII
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file declares the targeting of the RegisterBankInfo class for Mips.
static StringRef getName(Value *V)
const SmallVectorImpl< MachineOperand > & Cond
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
LLVM_ABI APInt getLoBits(unsigned numBits) const
Compute an APInt containing numBits lowbits from this APInt.
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
int64_t getSExtValue() const
Get sign extended value.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
bool hasLocalLinkage() const
constexpr bool isScalar() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
TypeSize getValue() const
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
LLVM_ABI unsigned getEntrySize(const DataLayout &TD) const
getEntrySize - Return the size of each entry in the jump table.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
@ MOLoad
The memory access reads data.
LLVM_ABI Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
MachineOperand class - Representation of each machine instruction operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
Register getReg() const
getReg - Returns the register number.
void setTargetFlags(unsigned F)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
int getVarArgsFrameIndex() const
This class provides the information for the target register banks.
bool systemSupportsUnalignedAccess() const
Does the system support unaligned memory access.
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
bool isPositionIndependent() const
Value * getOperand(unsigned i) const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
Predicate getPredicate(unsigned Condition, unsigned Hint)
Return predicate consisting of specified condition and hint bits.
bool hasImm(uint64_t TSFlags)
NodeAddr< DefNode * > Def
NodeAddr< InstrNode * > Instr
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
InstructionSelector * createMipsInstructionSelector(const MipsTargetMachine &, const MipsSubtarget &, const MipsRegisterBankInfo &)
Definition MipsInstructionSelector.cpp:931
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.