LLVM: lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
28using namespace llvm;
29
30namespace {
31
32class AArch64AsmBackend : public MCAsmBackend {
33 static const unsigned PCRelFlagVal =
35protected:
37
38public:
39 AArch64AsmBackend(const Target &T, const Triple &TT, bool IsLittleEndian)
42 TheTriple(TT) {}
43
46 }
47
49
52
53
54
55
56 {"fixup_aarch64_pcrel_adr_imm21", 0, 32, PCRelFlagVal},
57 {"fixup_aarch64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal},
58 {"fixup_aarch64_add_imm12", 10, 12, 0},
59 {"fixup_aarch64_ldst_imm12_scale1", 10, 12, 0},
60 {"fixup_aarch64_ldst_imm12_scale2", 10, 12, 0},
61 {"fixup_aarch64_ldst_imm12_scale4", 10, 12, 0},
62 {"fixup_aarch64_ldst_imm12_scale8", 10, 12, 0},
63 {"fixup_aarch64_ldst_imm12_scale16", 10, 12, 0},
64 {"fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal},
65 {"fixup_aarch64_movw", 5, 16, 0},
66 {"fixup_aarch64_pcrel_branch9", 5, 9, PCRelFlagVal},
67 {"fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal},
68 {"fixup_aarch64_pcrel_branch16", 5, 16, PCRelFlagVal},
69 {"fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal},
70 {"fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal},
71 {"fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal}};
72
73
74
77
80
82 "Invalid kind!");
84 }
85
90
97
98 unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const;
99
103};
104
105}
106
107
109 switch (Kind) {
110 default:
112
114 return 1;
115
118 return 2;
119
132 return 3;
133
140 return 4;
141
143 return 8;
144 }
145}
146
148 unsigned lo2 = Value & 0x3;
149 unsigned hi19 = (Value & 0x1ffffc) >> 2;
150 return (hi19 << 5) | (lo2 << 29);
151}
152
155 const Triple &TheTriple, bool IsResolved) {
156 int64_t SignedValue = static_cast<int64_t>(Value);
157 switch (Fixup.getTargetKind()) {
158 default:
161 if (!isInt<21>(SignedValue))
167 if (!isInt<21>(SignedValue))
170 }
174
175 if (!isInt<21>(SignedValue))
178 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
179
180 return (Value >> 2) & 0x7ffff;
185
186 if (!isUInt<12>(Value))
192
193 if (!isUInt<13>(Value))
196 Ctx.reportError(Fixup.getLoc(), "fixup must be 2-byte aligned");
197 return Value >> 1;
201
202 if (!isUInt<14>(Value))
205 Ctx.reportError(Fixup.getLoc(), "fixup must be 4-byte aligned");
206 return Value >> 2;
210
211 if (!isUInt<15>(Value))
214 Ctx.reportError(Fixup.getLoc(), "fixup must be 8-byte aligned");
215 return Value >> 3;
219
220 if (!isUInt<16>(Value))
223 Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned");
224 return Value >> 4;
230 if (!RefKind) {
231
232 if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
234 "fixup value out of range [-0xFFFF, 0xFFFF]");
235
236
237 if (SignedValue < 0)
238 SignedValue = ~SignedValue;
240 } else
241
242
244 "relocation for a thread-local variable points to an "
245 "absolute symbol");
247 }
248
249 if (!IsResolved) {
250
251
252 Ctx.reportError(Fixup.getLoc(), "unresolved movw fixup not yet "
253 "implemented");
255 }
256
260 break;
262 SignedValue = SignedValue >> 16;
263 break;
265 SignedValue = SignedValue >> 32;
266 break;
268 SignedValue = SignedValue >> 48;
269 break;
270 default:
271 llvm_unreachable("Variant kind doesn't correspond to fixup");
272 }
273
274 } else {
277 break;
280 break;
283 break;
286 break;
287 default:
288 llvm_unreachable("Variant kind doesn't correspond to fixup");
289 }
290 }
291
294 }
296 if (SignedValue > 0xFFFF || SignedValue < -0xFFFF)
298
299
300 if (SignedValue < 0)
301 SignedValue = ~SignedValue;
303 }
304 else if (Value > 0xFFFF) {
306 }
308 }
310
311 if (!isInt<11>(SignedValue))
313
314 if (Value & 0b11)
315 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
316 return (Value >> 2) & 0x1ff;
318
319 if (!isInt<16>(SignedValue))
321
323 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
324 return (Value >> 2) & 0x3fff;
326
327 SignedValue = -SignedValue;
329
330 if (SignedValue < 0 || SignedValue > ((1 << 18) - 1))
332
333 if (Value & 0b11)
334 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
335 return (Value >> 2) & 0xffff;
338 if (TheTriple.isOSBinFormatCOFF() && !IsResolved && SignedValue != 0) {
339
340
342 "cannot perform a PC-relative fixup with a non-zero "
343 "symbol offset");
344 }
345
346 if (!isInt<28>(SignedValue))
348
350 Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
351 return (Value >> 2) & 0x3ffffff;
359 }
360}
361
362std::optional
363AArch64AsmBackend::getFixupKind(StringRef Name) const {
365 return std::nullopt;
366
368#define ELF_RELOC(X, Y) .Case(#X, Y)
369#include "llvm/BinaryFormat/ELFRelocs/AArch64.def"
370#undef ELF_RELOC
371 .Case("BFD_RELOC_NONE", ELF::R_AARCH64_NONE)
372 .Case("BFD_RELOC_16", ELF::R_AARCH64_ABS16)
373 .Case("BFD_RELOC_32", ELF::R_AARCH64_ABS32)
374 .Case("BFD_RELOC_64", ELF::R_AARCH64_ABS64)
376 if (Type == -1u)
377 return std::nullopt;
379}
380
381
382
383unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const {
385 return 0;
386
387 switch (Kind) {
388 default:
390
392 return 1;
394 return 2;
396 return 4;
398 return 8;
399
416
417 return 0;
418 }
419}
420
424 bool IsResolved,
429 if (SymLoc == AArch64AuthMCExpr::VK_AUTH ||
430 SymLoc == AArch64AuthMCExpr::VK_AUTHADDR) {
432 const auto *Expr = cast(Fixup.getValue());
433 Value = (uint64_t(Expr->getDiscriminator()) << 32) |
434 (uint64_t(Expr->getKey()) << 60) |
435 (uint64_t(Expr->hasAddressDiversity()) << 63);
436 }
437 }
438
440 return;
443 return;
447 int64_t SignedValue = static_cast<int64_t>(Value);
448
450
451
453
455 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
456
457
458 unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind());
459
460
461
462 if (FulleSizeInBytes == 0) {
463
464 for (unsigned i = 0; i != NumBytes; ++i) {
466 }
467 } else {
468
469 assert((Offset + FulleSizeInBytes) <= Data.size() && "Invalid fixup size!");
470 assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!");
471 for (unsigned i = 0; i != NumBytes; ++i) {
472 unsigned Idx = FulleSizeInBytes - 1 - i;
474 }
475 }
476
477
478
483
484
485 if (SignedValue < 0)
486 Data[Offset + 3] &= ~(1 << 6);
487 else
488 Data[Offset + 3] |= (1 << 6);
489 }
490}
491
492bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
494
495
496
497
498 return int64_t(Value) != int64_t(int8_t(Value));
499}
500
501void AArch64AsmBackend::relaxInstruction(MCInst &Inst,
503 llvm_unreachable("AArch64AsmBackend::relaxInstruction() unimplemented");
504}
505
508
509
510
512
513
514 Count /= 4;
515 for (uint64_t i = 0; i != Count; ++i)
516 OS.write("\x1f\x20\x03\xd5", 4);
517 return true;
518}
519
520bool AArch64AsmBackend::shouldForceRelocation(const MCAssembler &Asm,
527 return true;
528
529
530
531
532
533
534
535
536
537
538
539
540
542 return true;
543
544 return false;
545}
546
547namespace {
548
550
551
553
554
555 UNWIND_ARM64_MODE_FRAMELESS = 0x02000000,
556
557
558
559
560
561
562 UNWIND_ARM64_MODE_DWARF = 0x03000000,
563
564
565
566
567
568
569
570 UNWIND_ARM64_MODE_FRAME = 0x04000000,
571
572
573 UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001,
574 UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002,
575 UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004,
576 UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008,
577 UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010,
578 UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100,
579 UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200,
580 UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400,
581 UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800
582};
583
584}
585
586
587class DarwinAArch64AsmBackend : public AArch64AsmBackend {
589
590
591
592
594 return (StackSize / 16) << 12;
595 }
596
597public:
598 DarwinAArch64AsmBackend(const Target &T, const Triple &TT,
600 : AArch64AsmBackend(T, TT, true), MRI(MRI) {}
601
602 std::unique_ptr
603 createObjectTargetWriter() const override {
608 }
609
610
612 const MCContext *Ctxt) const override {
614 if (Instrs.empty())
615 return CU::UNWIND_ARM64_MODE_FRAMELESS;
616 if (!isDarwinCanonicalPersonality(FI->Personality) &&
618 return CU::UNWIND_ARM64_MODE_DWARF;
619
620 bool HasFP = false;
622
623 uint64_t CompactUnwindEncoding = 0;
624 int64_t CurOffset = 0;
625 for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
627
629 default:
630
631 return CU::UNWIND_ARM64_MODE_DWARF;
633
636
637
638
639
640
641 if (XReg != AArch64::FP)
642 return CU::UNWIND_ARM64_MODE_DWARF;
643
644 if (i + 2 >= e)
645 return CU::UNWIND_ARM64_MODE_DWARF;
646
649 return CU::UNWIND_ARM64_MODE_DWARF;
652 return CU::UNWIND_ARM64_MODE_DWARF;
653
655 return CU::UNWIND_ARM64_MODE_DWARF;
657
660
663
664 if (LRReg != AArch64::LR || FPReg != AArch64::FP)
665 return CU::UNWIND_ARM64_MODE_DWARF;
666
667
668 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME;
670 break;
671 }
673 if (StackSize != 0)
674 return CU::UNWIND_ARM64_MODE_DWARF;
675 StackSize = std::abs(Inst.getOffset());
676 break;
677 }
679
680
682 if (i + 1 == e)
683 return CU::UNWIND_ARM64_MODE_DWARF;
684
685 if (CurOffset != 0 && Inst.getOffset() != CurOffset - 8)
686 return CU::UNWIND_ARM64_MODE_DWARF;
688
691 return CU::UNWIND_ARM64_MODE_DWARF;
693
694 if (Inst2.getOffset() != CurOffset - 8)
695 return CU::UNWIND_ARM64_MODE_DWARF;
697
698
699
700
701
702
703
704
705
708
709 if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 &&
710 (CompactUnwindEncoding & 0xF1E) == 0)
711 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR;
712 else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 &&
713 (CompactUnwindEncoding & 0xF1C) == 0)
714 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR;
715 else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 &&
716 (CompactUnwindEncoding & 0xF18) == 0)
717 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR;
718 else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 &&
719 (CompactUnwindEncoding & 0xF10) == 0)
720 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR;
721 else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 &&
722 (CompactUnwindEncoding & 0xF00) == 0)
723 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR;
724 else {
727
728
729
730
731
732 if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 &&
733 (CompactUnwindEncoding & 0xE00) == 0)
734 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR;
735 else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 &&
736 (CompactUnwindEncoding & 0xC00) == 0)
737 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR;
738 else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 &&
739 (CompactUnwindEncoding & 0x800) == 0)
740 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR;
741 else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15)
742 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR;
743 else
744
745 return CU::UNWIND_ARM64_MODE_DWARF;
746 }
747
748 break;
749 }
750 }
751 }
752
753 if (!HasFP) {
754
755
756 if (StackSize > 65520)
757 return CU::UNWIND_ARM64_MODE_DWARF;
758
759 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS;
760 CompactUnwindEncoding |= encodeStackAdjustment(StackSize);
761 }
762
763 return CompactUnwindEncoding;
764 }
765};
766
767}
768
769namespace {
770
771class ELFAArch64AsmBackend : public AArch64AsmBackend {
772public:
774 bool IsILP32;
775
777 bool IsLittleEndian, bool IsILP32)
778 : AArch64AsmBackend(T, TT, IsLittleEndian), OSABI(OSABI),
779 IsILP32(IsILP32) {}
780
781 std::unique_ptr
782 createObjectTargetWriter() const override {
784 }
785};
786
787}
788
789namespace {
790class COFFAArch64AsmBackend : public AArch64AsmBackend {
791public:
792 COFFAArch64AsmBackend(const Target &T, const Triple &TheTriple)
793 : AArch64AsmBackend(T, TheTriple, true) {}
794
795 std::unique_ptr
796 createObjectTargetWriter() const override {
798 }
799};
800}
801
808 return new DarwinAArch64AsmBackend(T, TheTriple, MRI);
809 }
810
812 return new COFFAArch64AsmBackend(T, TheTriple);
813
815
818 return new ELFAArch64AsmBackend(T, TheTriple, OSABI, true,
819 IsILP32);
820}
821
828 "Big endian is only supported for ELF targets!");
831 return new ELFAArch64AsmBackend(T, TheTriple, OSABI, false,
832 IsILP32);
833}
unsigned const MachineRegisterInfo * MRI
static unsigned AdrImmBits(unsigned Value)
static unsigned getFixupKindNumBytes(unsigned Kind)
The number of bytes the fixup may change.
static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target, uint64_t Value, MCContext &Ctx, const Triple &TheTriple, bool IsResolved)
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
PowerPC TLS Dynamic Call Fixup
static constexpr Register FPReg
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static VariantKind getSymbolLoc(VariantKind Kind)
static VariantKind getAddressFrag(VariantKind Kind)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
Generic interface to target specific assembler backends.
virtual bool writeNopData(raw_ostream &OS, uint64_t Count, const MCSubtargetInfo *STI) const =0
Write an (optimal) nop sequence of Count bytes to the given output.
virtual void relaxInstruction(MCInst &Inst, const MCSubtargetInfo &STI) const
Relax the instruction in the given fragment to the next wider instruction.
virtual bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, const uint64_t Value, const MCSubtargetInfo *STI)
Hook to check if a relocation is needed for some target specific reason.
virtual bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value) const
Simple predicate for targets where !Resolved implies requiring relaxation.
virtual unsigned getNumFixupKinds() const =0
Get the number of target specific fixup kinds.
virtual const MCFixupKindInfo & getFixupKindInfo(MCFixupKind Kind) const
Get information on a fixup kind.
virtual std::optional< MCFixupKind > getFixupKind(StringRef Name) const
Map a relocation name used in .reloc to a fixup kind.
virtual void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, MutableArrayRef< char > Data, uint64_t Value, bool IsResolved, const MCSubtargetInfo *STI) const =0
Apply the Value for given Fixup into the provided data fragment, at the offset specified by the fixup...
unsigned getRegister() const
OpType getOperation() const
int64_t getOffset() const
Context object for machine code objects.
bool emitCompactUnwindNonCanonical() const
void reportError(SMLoc L, const Twine &Msg)
Encode information on a single operation to perform on a byte sequence (e.g., an encoded instruction)...
Instances of this class represent a single low-level machine instruction.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Generic base class for all target subtargets.
const Triple & getTargetTriple() const
This represents an "assembler immediate".
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
StringRef - Represent a constant reference to a string, i.e.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
OSType getOS() const
Get the parsed operating system type of this triple.
bool isOSBinFormatCOFF() const
Tests whether the OS uses the COFF binary format.
EnvironmentType getEnvironment() const
Get the parsed environment type of this triple.
bool isArch32Bit() const
Test whether the architecture is 32-bit.
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM Value Representation.
This class implements an extremely fast bulk output stream that can only output to a stream.
raw_ostream & write_zeros(unsigned NumZeros)
write_zeros - Insert 'NumZeros' nulls.
raw_ostream & write(unsigned char C)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
CompactUnwindEncodings
Compact unwind encoding values.
@ fixup_aarch64_pcrel_branch9
@ fixup_aarch64_pcrel_branch16
@ fixup_aarch64_ldst_imm12_scale4
@ fixup_aarch64_pcrel_call26
@ fixup_aarch64_pcrel_branch26
@ fixup_aarch64_pcrel_branch19
@ fixup_aarch64_ldr_pcrel_imm19
@ fixup_aarch64_pcrel_adr_imm21
@ fixup_aarch64_pcrel_branch14
@ fixup_aarch64_ldst_imm12_scale2
@ fixup_aarch64_ldst_imm12_scale16
@ fixup_aarch64_pcrel_adrp_imm21
@ fixup_aarch64_add_imm12
@ fixup_aarch64_ldst_imm12_scale8
@ fixup_aarch64_ldst_imm12_scale1
Expected< uint32_t > getCPUSubType(const Triple &T)
Expected< uint32_t > getCPUType(const Triple &T)
This is an optimization pass for GlobalISel generic memory operations.
std::unique_ptr< MCObjectTargetWriter > createAArch64WinCOFFObjectWriter(const Triple &TheTriple)
MCAsmBackend * createAArch64leAsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
MCFixupKind
Extensible enumeration to represent the type of a fixup.
@ FK_SecRel_2
A two-byte section relative fixup.
@ FirstLiteralRelocationKind
The range [FirstLiteralRelocationKind, MaxTargetFixupKind) is used for relocations coming from ....
@ FK_Data_8
A eight-byte fixup.
@ FK_Data_1
A one-byte fixup.
@ FK_Data_4
A four-byte fixup.
@ FK_SecRel_4
A four-byte section relative fixup.
@ FK_Data_2
A two-byte fixup.
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
static MCRegister getXRegFromWReg(MCRegister Reg)
std::unique_ptr< MCObjectTargetWriter > createAArch64MachObjectWriter(uint32_t CPUType, uint32_t CPUSubtype, bool IsILP32)
MCAsmBackend * createAArch64beAsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
static MCRegister getDRegFromBReg(MCRegister Reg)
std::unique_ptr< MCObjectTargetWriter > createAArch64ELFObjectWriter(uint8_t OSABI, bool IsILP32)
const MCSymbol * Personality
std::vector< MCCFIInstruction > Instructions
Target independent information on a fixup kind.
@ FKF_IsAlignedDownTo32Bits
Should this fixup kind force a 4-byte aligned effective PC value?
@ FKF_IsPCRel
Is this fixup kind PCrelative? This is used by the assembler backend to evaluate fixup values in a ta...