LLVM: lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
32
33using namespace llvm;
34
35#define GET_REGINFO_MC_DESC
36#include "X86GenRegisterInfo.inc"
37
38#define GET_INSTRINFO_MC_DESC
39#define GET_INSTRINFO_MC_HELPERS
40#define ENABLE_INSTR_PREDICATE_VERIFIER
41#include "X86GenInstrInfo.inc"
42
43#define GET_SUBTARGETINFO_MC_DESC
44#include "X86GenSubtargetInfo.inc"
45
47 std::string FS;
48
49
50 if (TT.isArch64Bit())
51 FS = "+64bit-mode,-32bit-mode,-16bit-mode,+sse2";
53 FS = "-64bit-mode,+32bit-mode,-16bit-mode";
54 else
55 FS = "-64bit-mode,-32bit-mode,+16bit-mode";
56
57 return FS;
58}
59
63
64 if (TT.isOSDarwin())
66 if (TT.isOSCygMing())
67
70}
71
74}
75
79 const MCRegisterClass &RC = X86MCRegisterClasses[RegClassID];
80
82 (Index.isReg() && Index.getReg() && RC.contains(Index.getReg()));
83}
84
89
91 Index.isReg() && !Index.getReg())
92 return true;
94}
95
99 if (Base.isReg() && Base.getReg() == X86::EIP) {
100 assert(Index.isReg() && !Index.getReg() && "Invalid eip-based address");
101 return true;
102 }
103 if (Index.isReg() && Index.getReg() == X86::EIZ)
104 return true;
106}
107
108#ifndef NDEBUG
111}
112#endif
113
116 int MemoryOperand, uint64_t TSFlags) {
118 bool Is16BitMode = STI.hasFeature(X86::Is16Bit);
119 bool Is32BitMode = STI.hasFeature(X86::Is32Bit);
120 bool Is64BitMode = STI.hasFeature(X86::Is64Bit);
124 return true;
126 switch (Form) {
127 default:
128 break;
130 MCRegister siReg = MI.getOperand(1).getReg();
131 assert(((siReg == X86::SI && MI.getOperand(0).getReg() == X86::DI) ||
132 (siReg == X86::ESI && MI.getOperand(0).getReg() == X86::EDI) ||
133 (siReg == X86::RSI && MI.getOperand(0).getReg() == X86::RDI)) &&
134 "SI and DI register sizes do not match");
135 return (!Is32BitMode && siReg == X86::ESI) ||
136 (Is32BitMode && siReg == X86::SI);
137 }
139 MCRegister siReg = MI.getOperand(0).getReg();
140 return (!Is32BitMode && siReg == X86::ESI) ||
141 (Is32BitMode && siReg == X86::SI);
142 }
144 MCRegister siReg = MI.getOperand(0).getReg();
145 return (!Is32BitMode && siReg == X86::EDI) ||
146 (Is32BitMode && siReg == X86::DI);
147 }
148 }
149
150
151 if (MemoryOperand < 0)
152 return false;
153
157 }
161 }
165}
166
168
169 for (unsigned Reg = X86::NoRegister + 1; Reg < X86::NUM_TARGET_REGS; ++Reg) {
170 unsigned SEH = MRI->getEncodingValue(Reg);
171 MRI->mapLLVMRegToSEHReg(Reg, SEH);
172 }
173
174
175 static const struct {
178 } RegMap[] = {
179 {codeview::RegisterId::AL, X86::AL},
180 {codeview::RegisterId::CL, X86::CL},
181 {codeview::RegisterId::DL, X86::DL},
182 {codeview::RegisterId::BL, X86::BL},
183 {codeview::RegisterId::AH, X86::AH},
184 {codeview::RegisterId::CH, X86::CH},
185 {codeview::RegisterId::DH, X86::DH},
186 {codeview::RegisterId::BH, X86::BH},
187 {codeview::RegisterId::AX, X86::AX},
188 {codeview::RegisterId::CX, X86::CX},
189 {codeview::RegisterId::DX, X86::DX},
190 {codeview::RegisterId::BX, X86::BX},
191 {codeview::RegisterId::SP, X86::SP},
192 {codeview::RegisterId::BP, X86::BP},
193 {codeview::RegisterId::SI, X86::SI},
194 {codeview::RegisterId::DI, X86::DI},
195 {codeview::RegisterId::EAX, X86::EAX},
196 {codeview::RegisterId::ECX, X86::ECX},
197 {codeview::RegisterId::EDX, X86::EDX},
198 {codeview::RegisterId::EBX, X86::EBX},
199 {codeview::RegisterId::ESP, X86::ESP},
200 {codeview::RegisterId::EBP, X86::EBP},
201 {codeview::RegisterId::ESI, X86::ESI},
202 {codeview::RegisterId::EDI, X86::EDI},
203
204 {codeview::RegisterId::EFLAGS, X86::EFLAGS},
205
206 {codeview::RegisterId::ST0, X86::ST0},
207 {codeview::RegisterId::ST1, X86::ST1},
208 {codeview::RegisterId::ST2, X86::ST2},
209 {codeview::RegisterId::ST3, X86::ST3},
210 {codeview::RegisterId::ST4, X86::ST4},
211 {codeview::RegisterId::ST5, X86::ST5},
212 {codeview::RegisterId::ST6, X86::ST6},
213 {codeview::RegisterId::ST7, X86::ST7},
214
215 {codeview::RegisterId::ST0, X86::FP0},
216 {codeview::RegisterId::ST1, X86::FP1},
217 {codeview::RegisterId::ST2, X86::FP2},
218 {codeview::RegisterId::ST3, X86::FP3},
219 {codeview::RegisterId::ST4, X86::FP4},
220 {codeview::RegisterId::ST5, X86::FP5},
221 {codeview::RegisterId::ST6, X86::FP6},
222 {codeview::RegisterId::ST7, X86::FP7},
223
224 {codeview::RegisterId::MM0, X86::MM0},
225 {codeview::RegisterId::MM1, X86::MM1},
226 {codeview::RegisterId::MM2, X86::MM2},
227 {codeview::RegisterId::MM3, X86::MM3},
228 {codeview::RegisterId::MM4, X86::MM4},
229 {codeview::RegisterId::MM5, X86::MM5},
230 {codeview::RegisterId::MM6, X86::MM6},
231 {codeview::RegisterId::MM7, X86::MM7},
232
233 {codeview::RegisterId::XMM0, X86::XMM0},
234 {codeview::RegisterId::XMM1, X86::XMM1},
235 {codeview::RegisterId::XMM2, X86::XMM2},
236 {codeview::RegisterId::XMM3, X86::XMM3},
237 {codeview::RegisterId::XMM4, X86::XMM4},
238 {codeview::RegisterId::XMM5, X86::XMM5},
239 {codeview::RegisterId::XMM6, X86::XMM6},
240 {codeview::RegisterId::XMM7, X86::XMM7},
241
242 {codeview::RegisterId::XMM8, X86::XMM8},
243 {codeview::RegisterId::XMM9, X86::XMM9},
244 {codeview::RegisterId::XMM10, X86::XMM10},
245 {codeview::RegisterId::XMM11, X86::XMM11},
246 {codeview::RegisterId::XMM12, X86::XMM12},
247 {codeview::RegisterId::XMM13, X86::XMM13},
248 {codeview::RegisterId::XMM14, X86::XMM14},
249 {codeview::RegisterId::XMM15, X86::XMM15},
250
251 {codeview::RegisterId::SIL, X86::SIL},
252 {codeview::RegisterId::DIL, X86::DIL},
253 {codeview::RegisterId::BPL, X86::BPL},
254 {codeview::RegisterId::SPL, X86::SPL},
255 {codeview::RegisterId::RAX, X86::RAX},
256 {codeview::RegisterId::RBX, X86::RBX},
257 {codeview::RegisterId::RCX, X86::RCX},
258 {codeview::RegisterId::RDX, X86::RDX},
259 {codeview::RegisterId::RSI, X86::RSI},
260 {codeview::RegisterId::RDI, X86::RDI},
261 {codeview::RegisterId::RBP, X86::RBP},
262 {codeview::RegisterId::RSP, X86::RSP},
263 {codeview::RegisterId::R8, X86::R8},
264 {codeview::RegisterId::R9, X86::R9},
265 {codeview::RegisterId::R10, X86::R10},
266 {codeview::RegisterId::R11, X86::R11},
267 {codeview::RegisterId::R12, X86::R12},
268 {codeview::RegisterId::R13, X86::R13},
269 {codeview::RegisterId::R14, X86::R14},
270 {codeview::RegisterId::R15, X86::R15},
271 {codeview::RegisterId::R8B, X86::R8B},
272 {codeview::RegisterId::R9B, X86::R9B},
273 {codeview::RegisterId::R10B, X86::R10B},
274 {codeview::RegisterId::R11B, X86::R11B},
275 {codeview::RegisterId::R12B, X86::R12B},
276 {codeview::RegisterId::R13B, X86::R13B},
277 {codeview::RegisterId::R14B, X86::R14B},
278 {codeview::RegisterId::R15B, X86::R15B},
279 {codeview::RegisterId::R8W, X86::R8W},
280 {codeview::RegisterId::R9W, X86::R9W},
281 {codeview::RegisterId::R10W, X86::R10W},
282 {codeview::RegisterId::R11W, X86::R11W},
283 {codeview::RegisterId::R12W, X86::R12W},
284 {codeview::RegisterId::R13W, X86::R13W},
285 {codeview::RegisterId::R14W, X86::R14W},
286 {codeview::RegisterId::R15W, X86::R15W},
287 {codeview::RegisterId::R8D, X86::R8D},
288 {codeview::RegisterId::R9D, X86::R9D},
289 {codeview::RegisterId::R10D, X86::R10D},
290 {codeview::RegisterId::R11D, X86::R11D},
291 {codeview::RegisterId::R12D, X86::R12D},
292 {codeview::RegisterId::R13D, X86::R13D},
293 {codeview::RegisterId::R14D, X86::R14D},
294 {codeview::RegisterId::R15D, X86::R15D},
295 {codeview::RegisterId::AMD64_YMM0, X86::YMM0},
296 {codeview::RegisterId::AMD64_YMM1, X86::YMM1},
297 {codeview::RegisterId::AMD64_YMM2, X86::YMM2},
298 {codeview::RegisterId::AMD64_YMM3, X86::YMM3},
299 {codeview::RegisterId::AMD64_YMM4, X86::YMM4},
300 {codeview::RegisterId::AMD64_YMM5, X86::YMM5},
301 {codeview::RegisterId::AMD64_YMM6, X86::YMM6},
302 {codeview::RegisterId::AMD64_YMM7, X86::YMM7},
303 {codeview::RegisterId::AMD64_YMM8, X86::YMM8},
304 {codeview::RegisterId::AMD64_YMM9, X86::YMM9},
305 {codeview::RegisterId::AMD64_YMM10, X86::YMM10},
306 {codeview::RegisterId::AMD64_YMM11, X86::YMM11},
307 {codeview::RegisterId::AMD64_YMM12, X86::YMM12},
308 {codeview::RegisterId::AMD64_YMM13, X86::YMM13},
309 {codeview::RegisterId::AMD64_YMM14, X86::YMM14},
310 {codeview::RegisterId::AMD64_YMM15, X86::YMM15},
311 {codeview::RegisterId::AMD64_YMM16, X86::YMM16},
312 {codeview::RegisterId::AMD64_YMM17, X86::YMM17},
313 {codeview::RegisterId::AMD64_YMM18, X86::YMM18},
314 {codeview::RegisterId::AMD64_YMM19, X86::YMM19},
315 {codeview::RegisterId::AMD64_YMM20, X86::YMM20},
316 {codeview::RegisterId::AMD64_YMM21, X86::YMM21},
317 {codeview::RegisterId::AMD64_YMM22, X86::YMM22},
318 {codeview::RegisterId::AMD64_YMM23, X86::YMM23},
319 {codeview::RegisterId::AMD64_YMM24, X86::YMM24},
320 {codeview::RegisterId::AMD64_YMM25, X86::YMM25},
321 {codeview::RegisterId::AMD64_YMM26, X86::YMM26},
322 {codeview::RegisterId::AMD64_YMM27, X86::YMM27},
323 {codeview::RegisterId::AMD64_YMM28, X86::YMM28},
324 {codeview::RegisterId::AMD64_YMM29, X86::YMM29},
325 {codeview::RegisterId::AMD64_YMM30, X86::YMM30},
326 {codeview::RegisterId::AMD64_YMM31, X86::YMM31},
327 {codeview::RegisterId::AMD64_ZMM0, X86::ZMM0},
328 {codeview::RegisterId::AMD64_ZMM1, X86::ZMM1},
329 {codeview::RegisterId::AMD64_ZMM2, X86::ZMM2},
330 {codeview::RegisterId::AMD64_ZMM3, X86::ZMM3},
331 {codeview::RegisterId::AMD64_ZMM4, X86::ZMM4},
332 {codeview::RegisterId::AMD64_ZMM5, X86::ZMM5},
333 {codeview::RegisterId::AMD64_ZMM6, X86::ZMM6},
334 {codeview::RegisterId::AMD64_ZMM7, X86::ZMM7},
335 {codeview::RegisterId::AMD64_ZMM8, X86::ZMM8},
336 {codeview::RegisterId::AMD64_ZMM9, X86::ZMM9},
337 {codeview::RegisterId::AMD64_ZMM10, X86::ZMM10},
338 {codeview::RegisterId::AMD64_ZMM11, X86::ZMM11},
339 {codeview::RegisterId::AMD64_ZMM12, X86::ZMM12},
340 {codeview::RegisterId::AMD64_ZMM13, X86::ZMM13},
341 {codeview::RegisterId::AMD64_ZMM14, X86::ZMM14},
342 {codeview::RegisterId::AMD64_ZMM15, X86::ZMM15},
343 {codeview::RegisterId::AMD64_ZMM16, X86::ZMM16},
344 {codeview::RegisterId::AMD64_ZMM17, X86::ZMM17},
345 {codeview::RegisterId::AMD64_ZMM18, X86::ZMM18},
346 {codeview::RegisterId::AMD64_ZMM19, X86::ZMM19},
347 {codeview::RegisterId::AMD64_ZMM20, X86::ZMM20},
348 {codeview::RegisterId::AMD64_ZMM21, X86::ZMM21},
349 {codeview::RegisterId::AMD64_ZMM22, X86::ZMM22},
350 {codeview::RegisterId::AMD64_ZMM23, X86::ZMM23},
351 {codeview::RegisterId::AMD64_ZMM24, X86::ZMM24},
352 {codeview::RegisterId::AMD64_ZMM25, X86::ZMM25},
353 {codeview::RegisterId::AMD64_ZMM26, X86::ZMM26},
354 {codeview::RegisterId::AMD64_ZMM27, X86::ZMM27},
355 {codeview::RegisterId::AMD64_ZMM28, X86::ZMM28},
356 {codeview::RegisterId::AMD64_ZMM29, X86::ZMM29},
357 {codeview::RegisterId::AMD64_ZMM30, X86::ZMM30},
358 {codeview::RegisterId::AMD64_ZMM31, X86::ZMM31},
359 {codeview::RegisterId::AMD64_K0, X86::K0},
360 {codeview::RegisterId::AMD64_K1, X86::K1},
361 {codeview::RegisterId::AMD64_K2, X86::K2},
362 {codeview::RegisterId::AMD64_K3, X86::K3},
363 {codeview::RegisterId::AMD64_K4, X86::K4},
364 {codeview::RegisterId::AMD64_K5, X86::K5},
365 {codeview::RegisterId::AMD64_K6, X86::K6},
366 {codeview::RegisterId::AMD64_K7, X86::K7},
367 {codeview::RegisterId::AMD64_XMM16, X86::XMM16},
368 {codeview::RegisterId::AMD64_XMM17, X86::XMM17},
369 {codeview::RegisterId::AMD64_XMM18, X86::XMM18},
370 {codeview::RegisterId::AMD64_XMM19, X86::XMM19},
371 {codeview::RegisterId::AMD64_XMM20, X86::XMM20},
372 {codeview::RegisterId::AMD64_XMM21, X86::XMM21},
373 {codeview::RegisterId::AMD64_XMM22, X86::XMM22},
374 {codeview::RegisterId::AMD64_XMM23, X86::XMM23},
375 {codeview::RegisterId::AMD64_XMM24, X86::XMM24},
376 {codeview::RegisterId::AMD64_XMM25, X86::XMM25},
377 {codeview::RegisterId::AMD64_XMM26, X86::XMM26},
378 {codeview::RegisterId::AMD64_XMM27, X86::XMM27},
379 {codeview::RegisterId::AMD64_XMM28, X86::XMM28},
380 {codeview::RegisterId::AMD64_XMM29, X86::XMM29},
381 {codeview::RegisterId::AMD64_XMM30, X86::XMM30},
382 {codeview::RegisterId::AMD64_XMM31, X86::XMM31},
383
384 };
385 for (const auto &I : RegMap)
386 MRI->mapLLVMRegToCVReg(I.Reg, static_cast<int>(I.CVReg));
387}
388
392 assert(!ArchFS.empty() && "Failed to parse X86 triple");
393 if (!FS.empty())
394 ArchFS = (Twine(ArchFS) + "," + FS).str();
395
397 CPU = "generic";
398
399 size_t posNoEVEX512 = FS.rfind("-evex512");
400
401 size_t posNoAVX512F =
402 FS.ends_with("-avx512f") ? FS.size() - 8 : FS.rfind("-avx512f,");
403 size_t posEVEX512 = FS.rfind("+evex512");
404 size_t posAVX512F = FS.rfind("+avx512");
405
407 (posNoAVX512F == StringRef::npos || posNoAVX512F < posAVX512F))
409 ArchFS += ",+evex512";
410
411 return createX86MCSubtargetInfoImpl(TT, CPU, CPU, ArchFS);
412}
413
416 InitX86MCInstrInfo(X);
417 return X;
418}
419
422 ? X86::RIP
423 : X86::EIP;
424
429 return X;
430}
431
433 const Triple &TheTriple,
436
441 else
444
448 if (Options.getAssemblyLanguage().equals_insensitive("masm"))
450 else
455 } else if (TheTriple.isUEFI()) {
457 } else {
458
460 }
461
462
463
464 int stackGrowth = is64Bit ? -8 : -4;
465
466
467 unsigned StackPtr = is64Bit ? X86::RSP : X86::ESP;
469 nullptr, MRI.getDwarfRegNum(StackPtr, true), -stackGrowth);
471
472
473 unsigned InstPtr = is64Bit ? X86::RIP : X86::EIP;
475 nullptr, MRI.getDwarfRegNum(InstPtr, true), stackGrowth);
477
478 return MAI;
479}
480
482 unsigned SyntaxVariant,
486 if (SyntaxVariant == 0)
488 if (SyntaxVariant == 1)
490 return nullptr;
491}
492
495
497}
498
499namespace llvm {
500namespace X86_MC {
501
506
507public:
509
510#define GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS
511#include "X86GenSubtargetInfo.inc"
512
514 APInt &Mask) const override;
515 std::vector<std::pair<uint64_t, uint64_t>>
517 const Triple &TargetTriple) const override;
518
521 std::optional<uint64_t>
524 std::optional<uint64_t>
527};
528
529#define GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS
530#include "X86GenSubtargetInfo.inc"
531
534 APInt &Mask) const {
536 unsigned NumDefs = Desc.getNumDefs();
537 unsigned NumImplicitDefs = Desc.implicit_defs().size();
538 assert(Mask.getBitWidth() == NumDefs + NumImplicitDefs &&
539 "Unexpected number of bits in the mask!");
540
544
545 const MCRegisterClass &GR32RC = MRI.getRegClass(X86::GR32RegClassID);
546 const MCRegisterClass &VR128XRC = MRI.getRegClass(X86::VR128XRegClassID);
547 const MCRegisterClass &VR256XRC = MRI.getRegClass(X86::VR256XRegClassID);
548
549 auto ClearsSuperReg = [=](unsigned RegID) {
550
551
552
553
555 return true;
556
557
558 if (!HasEVEX && !HasVEX && !HasXOP)
559 return false;
560
561
562
563
564
566 };
567
568 Mask.clearAllBits();
569 for (unsigned I = 0, E = NumDefs; I < E; ++I) {
571 if (ClearsSuperReg(Op.getReg()))
572 Mask.setBit(I);
573 }
574
575 for (unsigned I = 0, E = NumImplicitDefs; I < E; ++I) {
577 if (ClearsSuperReg(Reg))
578 Mask.setBit(NumDefs + I);
579 }
580
581 return Mask.getBoolValue();
582}
583
584static std::vector<std::pair<uint64_t, uint64_t>>
586
587 std::vector<std::pair<uint64_t, uint64_t>> Result;
588 for (uint64_t Byte = 0, End = PltContents.size(); Byte + 6 < End; ) {
589
590 if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0xa3) {
591
592
593
594
595
597 Result.emplace_back(PltSectionVA + Byte, Imm | (uint64_t(1) << 32));
598 Byte += 6;
599 } else if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0x25) {
600
601
603 Result.push_back(std::make_pair(PltSectionVA + Byte, Imm));
604 Byte += 6;
605 } else
606 Byte++;
607 }
608 return Result;
609}
610
611static std::vector<std::pair<uint64_t, uint64_t>>
613
614 std::vector<std::pair<uint64_t, uint64_t>> Result;
615 for (uint64_t Byte = 0, End = PltContents.size(); Byte + 6 < End; ) {
616
617 if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0x25) {
618
619
621 Result.push_back(
622 std::make_pair(PltSectionVA + Byte, PltSectionVA + Byte + 6 + Imm));
623 Byte += 6;
624 } else
625 Byte++;
626 }
627 return Result;
628}
629
630std::vector<std::pair<uint64_t, uint64_t>>
633 const Triple &TargetTriple) const {
634 switch (TargetTriple.getArch()) {
639 default:
640 return {};
641 }
642}
643
649 return false;
651 return true;
652}
653
659 if (MemOpStart == -1)
660 return std::nullopt;
662
668 if (SegReg.getReg() || IndexReg.getReg() || ScaleAmt.getImm() != 1 ||
670 return std::nullopt;
671
672
673 if (BaseReg.getReg() == X86::RIP)
675
676 return std::nullopt;
677}
678
679std::optional<uint64_t>
682 if (Inst.getOpcode() != X86::LEA64r)
683 return std::nullopt;
686 if (MemOpStart == -1)
687 return std::nullopt;
694
695 if (BaseReg.getReg() != X86::RIP || SegReg.getReg() || IndexReg.getReg() ||
697 return std::nullopt;
698
699 assert(Size > 4 && "invalid instruction size for rip-relative lea");
700 return Size - 4;
701}
702
703}
704
705}
706
709}
710
711
714
716
717
719
720
722
723
726
727
729
730
732
733
736
737
739
740
742
745
746
748
749
751 }
752
753
758}
759
762#define DEFAULT_NOREG \
763 default: \
764 return X86::NoRegister;
765#define SUB_SUPER(R1, R2, R3, R4, R) \
766 case X86::R1: \
767 case X86::R2: \
768 case X86::R3: \
769 case X86::R4: \
770 return X86::R;
771#define A_SUB_SUPER(R) \
772 case X86::AH: \
773 SUB_SUPER(AL, AX, EAX, RAX, R)
774#define D_SUB_SUPER(R) \
775 case X86::DH: \
776 SUB_SUPER(DL, DX, EDX, RDX, R)
777#define C_SUB_SUPER(R) \
778 case X86::CH: \
779 SUB_SUPER(CL, CX, ECX, RCX, R)
780#define B_SUB_SUPER(R) \
781 case X86::BH: \
782 SUB_SUPER(BL, BX, EBX, RBX, R)
783#define SI_SUB_SUPER(R) SUB_SUPER(SIL, SI, ESI, RSI, R)
784#define DI_SUB_SUPER(R) SUB_SUPER(DIL, DI, EDI, RDI, R)
785#define BP_SUB_SUPER(R) SUB_SUPER(BPL, BP, EBP, RBP, R)
786#define SP_SUB_SUPER(R) SUB_SUPER(SPL, SP, ESP, RSP, R)
787#define NO_SUB_SUPER(NO, REG) \
788 SUB_SUPER(R##NO##B, R##NO##W, R##NO##D, R##NO, REG)
789#define NO_SUB_SUPER_B(NO) NO_SUB_SUPER(NO, R##NO##B)
790#define NO_SUB_SUPER_W(NO) NO_SUB_SUPER(NO, R##NO##W)
791#define NO_SUB_SUPER_D(NO) NO_SUB_SUPER(NO, R##NO##D)
792#define NO_SUB_SUPER_Q(NO) NO_SUB_SUPER(NO, R##NO)
793 switch (Size) {
794 default:
796 case 8:
798 switch (Reg.id()) {
804 }
805 } else {
806 switch (Reg.id()) {
840 }
841 }
842 case 16:
843 switch (Reg.id()) {
877 }
878 case 32:
879 switch (Reg.id()) {
913 }
914 case 64:
915 switch (Reg.id()) {
949 }
950 }
951}
unsigned const MachineRegisterInfo * MRI
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Analysis containing CSE Info
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI optimize exec mask operations pre RA
static bool is64Bit(const char *name)
#define NO_SUB_SUPER_W(NO)
#define NO_SUB_SUPER_Q(NO)
static MCRelocationInfo * createX86MCRelocationInfo(const Triple &TheTriple, MCContext &Ctx)
static MCInstrInfo * createX86MCInstrInfo()
#define NO_SUB_SUPER_D(NO)
static MCRegisterInfo * createX86MCRegisterInfo(const Triple &TT)
static MCInstPrinter * createX86MCInstPrinter(const Triple &T, unsigned SyntaxVariant, const MCAsmInfo &MAI, const MCInstrInfo &MII, const MCRegisterInfo &MRI)
static MCInstrAnalysis * createX86MCInstrAnalysis(const MCInstrInfo *Info)
LLVM_C_ABI void LLVMInitializeX86TargetMC()
#define NO_SUB_SUPER_B(NO)
static MCAsmInfo * createX86MCAsmInfo(const MCRegisterInfo &MRI, const Triple &TheTriple, const MCTargetOptions &Options)
static bool isMemOperand(const MCInst &MI, unsigned Op, unsigned RegClassID)
Class for arbitrary precision integers.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
This class represents an Operation in the Expression.
This class is intended to be used as a base class for asm properties and features specific to the tar...
void addInitialFrameState(const MCCFIInstruction &Inst)
static MCCFIInstruction cfiDefCfa(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it.
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
Context object for machine code objects.
This is an instance of a target assembly language printer that converts an MCInst to valid target ass...
Instances of this class represent a single low-level machine instruction.
unsigned getNumOperands() const
unsigned getOpcode() const
const MCOperand & getOperand(unsigned i) const
Describe properties that are true of each instruction in the target description file.
ArrayRef< MCOperandInfo > operands() const
Interface to description of machine instruction set.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Instances of this class represent operands of the MCInst class.
MCRegister getReg() const
Returns the register number.
MCRegisterClass - Base class of TargetRegisterClass.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Create MCExprs from relocations found in an object file.
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
static constexpr size_t npos
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
bool isOSCygMing() const
Tests for either Cygwin or MinGW OS.
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
bool isWindowsCoreCLREnvironment() const
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isUEFI() const
Tests whether the OS is UEFI.
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
bool isWindowsMSVCEnvironment() const
Checks if the environment could be MSVC.
bool isWindowsItaniumEnvironment() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
bool evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size, uint64_t &Target) const override
Given a branch instruction try to get the address the branch targets.
X86MCInstrAnalysis(const MCInstrInfo *MCII)
std::optional< uint64_t > evaluateMemoryOperandAddress(const MCInst &Inst, const MCSubtargetInfo *STI, uint64_t Addr, uint64_t Size) const override
Given an instruction tries to get the address of a memory operand.
std::optional< uint64_t > getMemoryOperandRelocationOffset(const MCInst &Inst, uint64_t Size) const override
Given an instruction with a memory operand that could require relocation, returns the offset within t...
bool clearsSuperRegisters(const MCRegisterInfo &MRI, const MCInst &Inst, APInt &Mask) const override
Returns true if at least one of the register writes performed by.
std::vector< std::pair< uint64_t, uint64_t > > findPltEntries(uint64_t PltSectionVA, ArrayRef< uint8_t > PltContents, const Triple &TargetTriple) const override
Returns (PLT virtual address, GOT virtual address) pairs for PLT entries.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ RawFrmDstSrc
RawFrmDstSrc - This form is for instructions that use the source index register SI/ESI/RSI with a pos...
@ EVEX
EVEX - Specifies that this instruction use EVEX form which provides syntax support up to 32 512-bit r...
@ RawFrmDst
RawFrmDst - This form is for instructions that use the destination index register DI/EDI/RDI.
@ VEX
VEX - encoding using 0xC4/0xC5.
@ XOP
XOP - Opcode prefix used by XOP instructions.
@ RawFrmSrc
RawFrmSrc - This form is for instructions that use the source index register SI/ESI/RSI with a possib...
int getMemoryOperandNo(uint64_t TSFlags)
unsigned getOperandBias(const MCInstrDesc &Desc)
Compute whether all of the def operands are repeated in the uses and therefore should be skipped.
bool is32BitMemOperand(const MCInst &MI, unsigned Op)
bool is16BitMemOperand(const MCInst &MI, unsigned Op, const MCSubtargetInfo &STI)
bool hasLockPrefix(const MCInst &MI)
Returns true if this instruction has a LOCK prefix.
void initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI)
static std::vector< std::pair< uint64_t, uint64_t > > findX86_64PltEntries(uint64_t PltSectionVA, ArrayRef< uint8_t > PltContents)
static std::vector< std::pair< uint64_t, uint64_t > > findX86PltEntries(uint64_t PltSectionVA, ArrayRef< uint8_t > PltContents)
bool needsAddressSizeOverride(const MCInst &MI, const MCSubtargetInfo &STI, int MemoryOperand, uint64_t TSFlags)
Returns true if this instruction needs an Address-Size override prefix.
std::string ParseX86Triple(const Triple &TT)
MCSubtargetInfo * createX86MCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS)
Create a X86 MCSubtargetInfo instance.
bool is64BitMemOperand(const MCInst &MI, unsigned Op)
unsigned getDwarfRegFlavour(const Triple &TT, bool isEH)
uint32_t read32le(const void *P)
This is an optimization pass for GlobalISel generic memory operations.
MCTargetStreamer * createX86ObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI)
Implements X86-only directives for object files.
MCRegister getX86SubSuperRegister(MCRegister Reg, unsigned Size, bool High=false)
MCAsmBackend * createX86_64AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
MCTargetStreamer * createX86AsmTargetStreamer(MCStreamer &S, formatted_raw_ostream &OS, MCInstPrinter *InstPrinter)
Implements X86-only directives for assembly emission.
MCCodeEmitter * createX86MCCodeEmitter(const MCInstrInfo &MCII, MCContext &Ctx)
Target & getTheX86_32Target()
MCRelocationInfo * createMCRelocationInfo(const Triple &TT, MCContext &Ctx)
MCStreamer * createX86ELFStreamer(const Triple &T, MCContext &Context, std::unique_ptr< MCAsmBackend > &&MAB, std::unique_ptr< MCObjectWriter > &&MOW, std::unique_ptr< MCCodeEmitter > &&MCE)
MCStreamer * createX86WinCOFFStreamer(MCContext &C, std::unique_ptr< MCAsmBackend > &&AB, std::unique_ptr< MCObjectWriter > &&OW, std::unique_ptr< MCCodeEmitter > &&CE)
Construct an X86 Windows COFF machine code streamer which will generate PE/COFF format object files.
MCAsmBackend * createX86_32AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
Target & getTheX86_64Target()
MCTargetStreamer * createX86NullTargetStreamer(MCStreamer &S)
Implements X86-only null emission.
Description of the encoding of one expression Op.
RegisterMCAsmInfoFn - Helper template for registering a target assembly info implementation.
static void RegisterMCRegInfo(Target &T, Target::MCRegInfoCtorFnTy Fn)
RegisterMCRegInfo - Register a MCRegisterInfo implementation for the given target.
static void RegisterMCAsmBackend(Target &T, Target::MCAsmBackendCtorTy Fn)
RegisterMCAsmBackend - Register a MCAsmBackend implementation for the given target.
static void RegisterMCCodeEmitter(Target &T, Target::MCCodeEmitterCtorTy Fn)
RegisterMCCodeEmitter - Register a MCCodeEmitter implementation for the given target.
static void RegisterMCSubtargetInfo(Target &T, Target::MCSubtargetInfoCtorFnTy Fn)
RegisterMCSubtargetInfo - Register a MCSubtargetInfo implementation for the given target.
static void RegisterObjectTargetStreamer(Target &T, Target::ObjectTargetStreamerCtorTy Fn)
static void RegisterMCInstrAnalysis(Target &T, Target::MCInstrAnalysisCtorFnTy Fn)
RegisterMCInstrAnalysis - Register a MCInstrAnalysis implementation for the given target.
static void RegisterELFStreamer(Target &T, Target::ELFStreamerCtorTy Fn)
static void RegisterNullTargetStreamer(Target &T, Target::NullTargetStreamerCtorTy Fn)
static void RegisterMCInstPrinter(Target &T, Target::MCInstPrinterCtorTy Fn)
RegisterMCInstPrinter - Register a MCInstPrinter implementation for the given target.
static void RegisterCOFFStreamer(Target &T, Target::COFFStreamerCtorTy Fn)
static void RegisterMCInstrInfo(Target &T, Target::MCInstrInfoCtorFnTy Fn)
RegisterMCInstrInfo - Register a MCInstrInfo implementation for the given target.
static void RegisterAsmTargetStreamer(Target &T, Target::AsmTargetStreamerCtorTy Fn)
static void RegisterMCRelocationInfo(Target &T, Target::MCRelocationInfoCtorTy Fn)
RegisterMCRelocationInfo - Register an MCRelocationInfo implementation for the given target.