LLVM: lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
29#include "llvm/IR/IntrinsicsAArch64.h"
32#include <initializer_list>
33
34#define DEBUG_TYPE "aarch64-legalinfo"
35
36using namespace llvm;
41
43 : ST(&ST) {
62
67
68 std::initializer_list PackedVectorAllTypeList = {
69 v16s8, v8s16, v4s32,
70 v2s64, v2p0,
71
72
73 v8s8, v4s16, v2s32};
74 std::initializer_list ScalarAndPtrTypesList = {s8, s16, s32, s64, p0};
77
78 const TargetMachine &TM = ST.getTargetLowering()->getTargetMachine();
79
80
81 if (!ST.hasNEON() || !ST.hasFPARMv8()) {
83 return;
84 }
85
86
87
88 const bool HasFP16 = ST.hasFullFP16();
89 const LLT &MinFPScalar = HasFP16 ? s16 : s32;
90
91 const bool HasCSSC = ST.hasCSSC();
92 const bool HasRCPC3 = ST.hasRCPC3();
93 const bool HasSVE = ST.hasSVE();
94
96 {G_IMPLICIT_DEF, G_FREEZE, G_CONSTANT_FOLD_BARRIER})
97 .legalFor({p0, s8, s16, s32, s64})
98 .legalFor({v2s8, v4s8, v8s8, v16s8, v2s16, v4s16, v8s16, v2s32, v4s32,
99 v2s64, v2p0})
100 .widenScalarToNextPow2(0)
110
112 .legalFor({p0, s16, s32, s64})
113 .legalFor(PackedVectorAllTypeList)
123
127 .widenScalarToNextPow2(0)
132 .maxScalarIf(typeInSet(0, {s64, p0}), 1, s32);
133
137 .widenScalarToNextPow2(1)
142 .maxScalarIf(typeInSet(1, {s64, p0}), 0, s32)
143 .maxScalarIf(typeInSet(1, {s128}), 0, s64);
144
146 .legalFor({s32, s64, v8s8, v16s8, v4s16, v8s16, v2s32, v4s32, v2s64})
147 .legalFor(HasSVE, {nxv16s8, nxv8s16, nxv4s32, nxv2s64})
148 .widenScalarToNextPow2(0)
156 return Query.Types[0].getNumElements() <= 2;
157 },
158 0, s32)
159 .minScalarOrEltIf(
161 return Query.Types[0].getNumElements() <= 4;
162 },
163 0, s16)
164 .minScalarOrEltIf(
166 return Query.Types[0].getNumElements() <= 16;
167 },
168 0, s8)
171
173 .legalFor({s32, s64, v8s8, v16s8, v4s16, v8s16, v2s32, v4s32, v2s64})
174 .widenScalarToNextPow2(0)
182 return Query.Types[0].getNumElements() <= 2;
183 },
184 0, s32)
185 .minScalarOrEltIf(
187 return Query.Types[0].getNumElements() <= 4;
188 },
189 0, s16)
190 .minScalarOrEltIf(
192 return Query.Types[0].getNumElements() <= 16;
193 },
194 0, s8)
197
200 const auto &SrcTy = Query.Types[0];
201 const auto &AmtTy = Query.Types[1];
202 return !SrcTy.isVector() && SrcTy.getSizeInBits() == 32 &&
203 AmtTy.getSizeInBits() == 32;
204 })
205 .legalFor({
206 {s32, s32},
207 {s32, s64},
208 {s64, s64},
209 {v8s8, v8s8},
210 {v16s8, v16s8},
211 {v4s16, v4s16},
212 {v8s16, v8s16},
213 {v2s32, v2s32},
214 {v4s32, v4s32},
215 {v2s64, v2s64},
216 })
217 .widenScalarToNextPow2(0)
229
231 .legalFor({{p0, s64}, {v2p0, v2s64}})
232 .clampScalarOrElt(1, s64, s64)
234
236
238 .legalFor({s32, s64})
239 .libcallFor({s128})
240 .clampScalar(0, s32, s64)
243
245 .lowerFor({s8, s16, s32, s64, v2s32, v4s32, v2s64})
246 .libcallFor({s128})
252
254 .widenScalarToNextPow2(0, 32)
257
259 .legalFor({s64, v16s8, v8s16, v4s32})
260 .lower();
261
263 .legalFor({v8s8, v16s8, v4s16, v8s16, v2s32, v4s32})
264 .legalFor(HasCSSC, {s32, s64})
265 .minScalar(HasCSSC, 0, s32)
270
271
273 .legalFor(HasCSSC, {s32, s64})
274 .legalFor(PackedVectorAllTypeList)
276
278 return SrcTy.isScalar() && SrcTy.getSizeInBits() < 128;
279 })
280 .widenScalarIf(
282 [=](const LegalityQuery &Query) { return std::make_pair(0, v4s16); })
283 .widenScalarIf(
285 [=](const LegalityQuery &Query) { return std::make_pair(0, v2s32); })
286 .clampNumElements(0, v8s8, v16s8)
292
294 {G_ABDS, G_ABDU, G_UAVGFLOOR, G_UAVGCEIL, G_SAVGFLOOR, G_SAVGCEIL})
295 .legalFor({v8s8, v16s8, v4s16, v8s16, v2s32, v4s32})
296 .lower();
297
299 {G_SADDE, G_SSUBE, G_UADDE, G_USUBE, G_SADDO, G_SSUBO, G_UADDO, G_USUBO})
300 .legalFor({{s32, s32}, {s64, s32}})
301 .clampScalar(0, s32, s64)
304
306 .customFor({{s32, s32}, {s32, s64}, {s64, s64}})
307 .lower();
308
310 .legalFor({{s32, s64}, {s64, s64}})
312 return Q.Types[0].isScalar() && Q.Types[1].getScalarSizeInBits() < 64;
313 })
314 .lower();
316
318 .customFor({{s32, s32}, {s64, s64}});
319
320 auto always = [=](const LegalityQuery &Q) { return true; };
322 .legalFor(HasCSSC, {{s32, s32}, {s64, s64}})
323 .legalFor({{v8s8, v8s8}, {v16s8, v16s8}})
324 .customFor(!HasCSSC, {{s32, s32}, {s64, s64}})
325 .customFor({{s128, s128},
326 {v4s16, v4s16},
327 {v8s16, v8s16},
328 {v2s32, v2s32},
329 {v4s32, v4s32},
330 {v2s64, v2s64}})
331 .clampScalar(0, s32, s128)
341
344 {s64, s64},
345 {v8s8, v8s8},
346 {v16s8, v16s8},
347 {v4s16, v4s16},
348 {v8s16, v8s16},
349 {v2s32, v2s32},
350 {v4s32, v4s32}})
351 .widenScalarToNextPow2(1, 32)
359
361
367 .legalFor(HasCSSC, {s32, s64})
368 .customFor(!HasCSSC, {s32, s64});
369
371
373 .legalFor({s32, s64, v8s8, v16s8})
374 .widenScalarToNextPow2(0, 32)
384
386 .legalFor({s32, s64, v4s16, v8s16, v2s32, v4s32, v2s64})
393
395 .legalFor({v8s8, v16s8, v4s16, v8s16, v2s32, v4s32, v2s64})
396 .legalFor(HasSVE, {nxv16s8, nxv8s16, nxv4s32, nxv2s64})
397 .clampNumElements(0, v8s8, v16s8)
404
406 {G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMA, G_FSQRT, G_FMAXNUM, G_FMINNUM,
407 G_FMAXIMUM, G_FMINIMUM, G_FCEIL, G_FFLOOR, G_FRINT, G_FNEARBYINT,
408 G_INTRINSIC_TRUNC, G_INTRINSIC_ROUND, G_INTRINSIC_ROUNDEVEN})
409 .legalFor({s32, s64, v2s32, v4s32, v2s64})
410 .legalFor(HasFP16, {s16, v4s16, v8s16})
411 .libcallFor({s128})
418
420 .legalFor({s32, s64, v2s32, v4s32, v2s64})
421 .legalFor(HasFP16, {s16, v4s16, v8s16})
428 .lowerFor({s16, v4s16, v8s16});
429
432 .minScalar(0, s32)
434
436 G_FLOG10, G_FTAN, G_FEXP, G_FEXP2, G_FEXP10,
437 G_FACOS, G_FASIN, G_FATAN, G_FATAN2, G_FCOSH,
438 G_FSINH, G_FTANH, G_FMODF})
439
441
447 .libcallFor({{s32, s32}, {s64, s32}, {s128, s32}});
448
450 .legalFor({{s32, s32}, {s32, s64}, {s64, s32}, {s64, s64}})
451 .legalFor(HasFP16, {{s32, s16}, {s64, s16}})
452 .minScalar(1, s32)
455 .legalFor({{s64, s32}, {s64, s64}})
456 .legalFor(HasFP16, {{s64, s16}})
457 .minScalar(0, s64)
460
461
464 [](const LegalityQuery &Query) { return Query.Types[0].isScalar(); },
466 const LLT Ty = Query.Types[0];
468 })
469 .lower();
470
472
473 for (unsigned Op : {G_SEXTLOAD, G_ZEXTLOAD}) {
475
476 if (Op == G_SEXTLOAD)
478
479
480 Actions
481 .legalForTypesWithMemDesc({{s32, p0, s8, 8},
482 {s32, p0, s16, 8},
483 {s32, p0, s32, 8},
484 {s64, p0, s8, 2},
485 {s64, p0, s16, 2},
486 {s64, p0, s32, 4},
487 {s64, p0, s64, 8},
488 {p0, p0, s64, 8},
489 {v2s32, p0, s64, 8}})
490 .widenScalarToNextPow2(0)
491 .clampScalar(0, s32, s64)
492
493
494 .unsupportedIfMemSizeNotPow2()
495
496 .lower();
497 }
498
499 auto IsPtrVecPred = [=](const LegalityQuery &Query) {
500 const LLT &ValTy = Query.Types[0];
501 return ValTy.isPointerVector() && ValTy.getAddressSpace() == 0;
502 };
503
506 return HasRCPC3 && Query.Types[0] == s128 &&
508 })
510 return Query.Types[0] == s128 &&
512 })
513 .legalForTypesWithMemDesc({{s8, p0, s8, 8},
514 {s16, p0, s16, 8},
515 {s32, p0, s32, 8},
516 {s64, p0, s64, 8},
517 {p0, p0, s64, 8},
518 {s128, p0, s128, 8},
519 {v8s8, p0, s64, 8},
520 {v16s8, p0, s128, 8},
521 {v4s16, p0, s64, 8},
522 {v8s16, p0, s128, 8},
523 {v2s32, p0, s64, 8},
524 {v4s32, p0, s128, 8},
525 {v2s64, p0, s128, 8}})
526
527 .legalForTypesWithMemDesc(
528 {{s32, p0, s8, 8}, {s32, p0, s16, 8}, {s64, p0, s32, 8}})
529 .legalForTypesWithMemDesc({
530
531 {nxv16s8, p0, nxv16s8, 8},
532 {nxv8s16, p0, nxv8s16, 8},
533 {nxv4s32, p0, nxv4s32, 8},
534 {nxv2s64, p0, nxv2s64, 8},
535 })
536 .widenScalarToNextPow2(0, 8)
546
547 return Query.Types[0].isScalar() &&
549 Query.Types[0].getSizeInBits() > 32;
550 },
552
555 const LLT VecTy = Query.Types[0];
557 })
558 .customIf(IsPtrVecPred)
561
564 return HasRCPC3 && Query.Types[0] == s128 &&
566 })
568 return Query.Types[0] == s128 &&
570 })
571 .legalForTypesWithMemDesc(
572 {{s8, p0, s8, 8}, {s16, p0, s8, 8},
573 {s32, p0, s8, 8},
574 {s64, p0, s8, 8},
575 {s16, p0, s16, 8}, {s32, p0, s16, 8},
576 {s64, p0, s16, 8},
577 {s32, p0, s8, 8}, {s32, p0, s16, 8}, {s32, p0, s32, 8},
578 {s64, p0, s64, 8}, {s64, p0, s32, 8},
579 {p0, p0, s64, 8}, {s128, p0, s128, 8}, {v16s8, p0, s128, 8},
580 {v8s8, p0, s64, 8}, {v4s16, p0, s64, 8}, {v8s16, p0, s128, 8},
581 {v2s32, p0, s64, 8}, {v4s32, p0, s128, 8}, {v2s64, p0, s128, 8}})
582 .legalForTypesWithMemDesc({
583
584
585
586
587 {nxv16s8, p0, nxv16s8, 8},
588 {nxv8s16, p0, nxv8s16, 8},
589 {nxv4s32, p0, nxv4s32, 8},
590 {nxv2s64, p0, nxv2s64, 8},
591 })
592 .clampScalar(0, s8, s64)
595 return Query.Types[0].isScalar() &&
597 })
598
599 .clampMaxNumElements(0, s8, 16)
605
608 return Query.Types[0].getSizeInBits() ==
609 Query.MMODescrs[0].MemoryTy.getSizeInBits();
610 })),
612 const LLT VecTy = Query.Types[0];
614 })
615 .customIf(IsPtrVecPred)
619
621
622
623
625 {p0, s8, s8, 8},
626 {p0, s16, s16, 8},
627 {p0, s32, s8, 8},
628 {p0, s32, s16, 8},
629 {p0, s32, s32, 8},
630 {p0, s64, s64, 8},
631 {p0, p0, p0, 8},
632 {p0, v8s8, v8s8, 8},
633 {p0, v16s8, v16s8, 8},
634 {p0, v4s16, v4s16, 8},
635 {p0, v8s16, v8s16, 8},
636 {p0, v2s32, v2s32, 8},
637 {p0, v4s32, v4s32, 8},
638 {p0, v2s64, v2s64, 8},
639 {p0, v2p0, v2p0, 8},
640 {p0, s128, s128, 8},
641 })
643
644 auto IndexedLoadBasicPred = [=](const LegalityQuery &Query) {
649 return false;
650 if (PtrTy != p0)
651 return false;
652 return true;
653 };
657 .legalIf(IndexedLoadBasicPred)
660 .unsupportedIf(
667 if (PtrTy != p0)
668 return false;
669 if (LdTy == s16)
670 return MemTy == s8;
671 if (LdTy == s32)
672 return MemTy == s8 || MemTy == s16;
673 if (LdTy == s64)
674 return MemTy == s8 || MemTy == s16 || MemTy == s32;
675 return false;
676 })))
678
679
681 .legalFor({p0, s8, s16, s32, s64})
682 .widenScalarToNextPow2(0)
685
686 .legalFor({s16, s32, s64, s128})
687 .clampScalar(0, MinFPScalar, s128);
688
689
691 .legalFor({{s32, s32}, {s32, s64}, {s32, p0}})
698 const LLT &Ty = Query.Types[0];
699 const LLT &SrcTy = Query.Types[1];
700 return Ty.isVector() && !SrcTy.isPointerVector() &&
701 Ty.getElementType() != SrcTy.getElementType();
702 },
703 0, 1)
704 .minScalarOrEltIf(
706 1, s32)
707 .minScalarOrEltIf(
709 return Query.Types[1].isPointerVector();
710 },
711 0, s64)
719
722 {s32, s64},
723 {v4s32, v4s32},
724 {v2s32, v2s32},
725 {v2s64, v2s64}})
726 .legalFor(HasFP16, {{s32, s16}, {v4s16, v4s16}, {v8s16, v8s16}})
733 const LLT &Ty = Query.Types[0];
734 const LLT &SrcTy = Query.Types[1];
735 return Ty.isVector() && !SrcTy.isPointerVector() &&
736 Ty.getElementType() != SrcTy.getElementType();
737 },
738 0, 1)
739 .clampNumElements(1, v4s16, v8s16)
744
745
746 auto ExtLegalFunc = [=](const LegalityQuery &Query) {
747 unsigned DstSize = Query.Types[0].getSizeInBits();
748
749
750 if (Query.Types[0].isVector())
751 return false;
752
753 if (DstSize < 8 || DstSize >= 128 || (DstSize))
754 return false;
755
756 const LLT &SrcTy = Query.Types[1];
757
758
759
760
761 unsigned SrcSize = SrcTy.getSizeInBits();
763 return false;
764
765 return true;
766 };
768 .legalIf(ExtLegalFunc)
769 .legalFor({{v8s16, v8s8}, {v4s32, v4s16}, {v2s64, v2s32}})
770 .clampScalar(0, s64, s64)
775
777 return (Query.Types[0].getScalarSizeInBits() >
778 Query.Types[1].getScalarSizeInBits() * 2) &&
779 Query.Types[0].isVector() &&
780 (Query.Types[1].getScalarSizeInBits() == 8 ||
781 Query.Types[1].getScalarSizeInBits() == 16);
782 })
783 .clampMinNumElements(1, s8, 8)
786
788 .legalFor({{v8s8, v8s16}, {v4s16, v4s32}, {v2s32, v2s64}})
794 [=](const LegalityQuery &Query) { return Query.Types[0].isVector(); },
795 0, s8)
799 return DstTy.isVector() && SrcTy.getSizeInBits() > 128 &&
801 })
802 .clampMinNumElements(0, s8, 8)
805
807 .legalFor({{v8s8, v8s16}, {v4s16, v4s32}, {v2s32, v2s64}});
808
811 .legalFor(PackedVectorAllTypeList)
818
819
822 {{s16, s32}, {s16, s64}, {s32, s64}, {v4s16, v4s32}, {v2s32, v2s64}})
823 .libcallFor({{s16, s128}, {s32, s128}, {s64, s128}})
828 return SrcTy.isFixedVector() && DstTy.isFixedVector() &&
829 SrcTy.getScalarSizeInBits() == 64 &&
831 })
832
833 .clampNumElements(1, v4s32, v4s32)
836
839 {{s32, s16}, {s64, s16}, {s64, s32}, {v4s32, v4s16}, {v2s64, v2s32}})
840 .libcallFor({{s128, s64}, {s128, s32}, {s128, s16}})
846 return SrcTy.isVector() && DstTy.isVector() &&
847 SrcTy.getScalarSizeInBits() == 16 &&
849 },
851 .clampNumElements(0, v4s32, v4s32)
854
855
857 .legalFor({{s32, s32},
858 {s64, s32},
859 {s32, s64},
860 {s64, s64},
861 {v2s32, v2s32},
862 {v4s32, v4s32},
863 {v2s64, v2s64}})
864 .legalFor(HasFP16,
865 {{s32, s16}, {s64, s16}, {v4s16, v4s16}, {v8s16, v8s16}})
868
869
872 return Query.Types[1] == s16 && Query.Types[0].getSizeInBits() > 64;
873 },
881 return Query.Types[0].getScalarSizeInBits() <= 64 &&
882 Query.Types[0].getScalarSizeInBits() >
883 Query.Types[1].getScalarSizeInBits();
884 },
886 .widenScalarIf(
888 return Query.Types[1].getScalarSizeInBits() <= 64 &&
889 Query.Types[0].getScalarSizeInBits() <
890 Query.Types[1].getScalarSizeInBits();
891 },
893 .clampNumElements(0, v4s16, v8s16)
897 {{s32, s128}, {s64, s128}, {s128, s128}, {s128, s32}, {s128, s64}});
898
900 .legalFor({{s32, s32},
901 {s64, s32},
902 {s32, s64},
903 {s64, s64},
904 {v2s32, v2s32},
905 {v4s32, v4s32},
906 {v2s64, v2s64}})
907 .legalFor(
908 HasFP16,
909 {{s16, s16}, {s32, s16}, {s64, s16}, {v4s16, v4s16}, {v8s16, v8s16}})
910
913
914
917 return Query.Types[1] == s16 && Query.Types[0].getSizeInBits() > 64;
918 },
927 unsigned ITySize = Query.Types[0].getScalarSizeInBits();
928 return (ITySize == 16 || ITySize == 32 || ITySize == 64) &&
929 ITySize > Query.Types[1].getScalarSizeInBits();
930 },
932 .widenScalarIf(
934 unsigned FTySize = Query.Types[1].getScalarSizeInBits();
935 return (FTySize == 16 || FTySize == 32 || FTySize == 64) &&
936 Query.Types[0].getScalarSizeInBits() < FTySize;
937 },
943
945 .legalFor({{s32, s32},
946 {s64, s32},
947 {s32, s64},
948 {s64, s64},
949 {v2s32, v2s32},
950 {v4s32, v4s32},
951 {v2s64, v2s64}})
952 .legalFor(HasFP16,
953 {{s16, s32}, {s16, s64}, {v4s16, v4s16}, {v8s16, v8s16}})
960 return Query.Types[1].isVector() &&
961 Query.Types[1].getScalarSizeInBits() == 64 &&
962 Query.Types[0].getScalarSizeInBits() == 16;
963 })
964 .widenScalarOrEltToNextPow2OrMinSize(0, HasFP16 ? 16 : 32)
966
968 return Query.Types[0].getScalarSizeInBits() == 32 &&
969 Query.Types[1].getScalarSizeInBits() == 64;
970 },
971 0)
972 .widenScalarIf(
974 return Query.Types[1].getScalarSizeInBits() <= 64 &&
975 Query.Types[0].getScalarSizeInBits() <
976 Query.Types[1].getScalarSizeInBits();
977 },
979 .widenScalarIf(
981 return Query.Types[0].getScalarSizeInBits() <= 64 &&
982 Query.Types[0].getScalarSizeInBits() >
983 Query.Types[1].getScalarSizeInBits();
984 },
986 .clampNumElements(0, v4s16, v8s16)
990 {s32, s128},
991 {s64, s128},
992 {s128, s128},
993 {s128, s32},
994 {s128, s64}});
995
996
1000 .clampScalar(0, s32, s32);
1002
1004 .legalFor({{s32, s32}, {s64, s32}, {p0, s32}})
1005 .widenScalarToNextPow2(0)
1011
1012
1014
1017 else
1019
1022
1024 .legalFor({{s64, p0}, {v2s64, v2p0}})
1025 .widenScalarToNextPow2(0, 64)
1028
1031 return Query.Types[0].getSizeInBits() != Query.Types[1].getSizeInBits();
1032 })
1033 .legalFor({{p0, s64}, {v2p0, v2s64}})
1034 .clampMaxNumElements(1, s64, 2);
1035
1036
1037
1039
1041 .legalForCartesianProduct({s64, v8s8, v4s16, v2s32})
1042 .legalForCartesianProduct({s128, v16s8, v8s16, v4s32, v2s64, v2p0})
1044
1047 return DstTy.isScalar() && SrcTy.isVector() &&
1048 SrcTy.getScalarSizeInBits() == 1;
1049 })
1051 return Query.Types[0].isVector() != Query.Types[1].isVector();
1052 })
1058
1060
1061
1062
1065 .clampScalar(0, s8, s64)
1067
1071
1072 bool UseOutlineAtomics = ST.outlineAtomics() && !ST.hasLSE();
1073
1075 .legalFor(!UseOutlineAtomics, {{s32, p0}, {s64, p0}})
1076 .customFor(!UseOutlineAtomics, {{s128, p0}})
1077 .libcallFor(UseOutlineAtomics,
1078 {{s8, p0}, {s16, p0}, {s32, p0}, {s64, p0}, {s128, p0}})
1079 .clampScalar(0, s32, s64);
1080
1082 G_ATOMICRMW_SUB, G_ATOMICRMW_AND, G_ATOMICRMW_OR,
1083 G_ATOMICRMW_XOR})
1084 .legalFor(!UseOutlineAtomics, {{s32, p0}, {s64, p0}})
1085 .libcallFor(UseOutlineAtomics,
1086 {{s8, p0}, {s16, p0}, {s32, p0}, {s64, p0}})
1087 .clampScalar(0, s32, s64);
1088
1089
1090
1092 {G_ATOMICRMW_MIN, G_ATOMICRMW_MAX, G_ATOMICRMW_UMIN, G_ATOMICRMW_UMAX})
1094 .clampScalar(0, s32, s64);
1095
1097
1098
1099 for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
1100 unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
1101 unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
1108 switch (Q.Types[BigTyIdx].getSizeInBits()) {
1109 case 32:
1110 case 64:
1111 case 128:
1112 break;
1113 default:
1114 return false;
1115 }
1116 switch (Q.Types[LitTyIdx].getSizeInBits()) {
1117 case 8:
1118 case 16:
1119 case 32:
1120 case 64:
1121 return true;
1122 default:
1123 return false;
1124 }
1125 });
1126 }
1127
1128
1130 .legalFor(HasSVE, {{s16, nxv16s8, s64},
1131 {s16, nxv8s16, s64},
1132 {s32, nxv4s32, s64},
1133 {s64, nxv2s64, s64}})
1134 .unsupportedIf([=](const LegalityQuery &Query) {
1135 const LLT &EltTy = Query.Types[1].getElementType();
1136 if (Query.Types[1].isScalableVector())
1137 return false;
1138 return Query.Types[0] != EltTy;
1139 })
1140 .minScalar(2, s64)
1142 const LLT &VecTy = Query.Types[1];
1143 return VecTy == v8s8 || VecTy == v16s8 || VecTy == v2s16 ||
1144 VecTy == v4s16 || VecTy == v8s16 || VecTy == v2s32 ||
1145 VecTy == v4s32 || VecTy == v2s64 || VecTy == v2p0;
1146 })
1147 .minScalarOrEltIf(
1149
1150
1151 return Query.Types[1].isFixedVector() &&
1152 Query.Types[1].getNumElements() <= 2;
1153 },
1154 0, s64)
1155 .minScalarOrEltIf(
1157 return Query.Types[1].isFixedVector() &&
1158 Query.Types[1].getNumElements() <= 4;
1159 },
1160 0, s32)
1161 .minScalarOrEltIf(
1163 return Query.Types[1].isFixedVector() &&
1164 Query.Types[1].getNumElements() <= 8;
1165 },
1166 0, s16)
1167 .minScalarOrEltIf(
1169 return Query.Types[1].isFixedVector() &&
1170 Query.Types[1].getNumElements() <= 16;
1171 },
1172 0, s8)
1173 .minScalarOrElt(0, s8)
1181
1184 typeInSet(0, {v8s8, v16s8, v4s16, v8s16, v2s32, v4s32, v2s64, v2p0}))
1185 .legalFor(HasSVE, {{nxv16s8, s32, s64},
1186 {nxv8s16, s32, s64},
1187 {nxv4s32, s32, s64},
1188 {nxv2s64, s64, s64}})
1197
1200 {v16s8, s8},
1201 {v4s16, s16},
1202 {v8s16, s16},
1203 {v2s32, s32},
1204 {v4s32, s32},
1205 {v2s64, s64},
1206 {v2p0, p0}})
1207 .clampNumElements(0, v4s32, v4s32)
1213
1215
1218 const LLT &DstTy = Query.Types[0];
1219 const LLT &SrcTy = Query.Types[1];
1220
1221
1222 if (DstTy != SrcTy)
1223 return false;
1225 {v8s8, v16s8, v4s16, v8s16, v2s32, v4s32, v2s64}, DstTy);
1226 })
1227 .moreElementsIf(
1229 return Query.Types[0].getNumElements() >
1230 Query.Types[1].getNumElements();
1231 },
1236 return Query.Types[0].getNumElements() <
1237 Query.Types[1].getNumElements();
1238 },
1240 .widenScalarOrEltToNextPow2OrMinSize(0, 8)
1247
1248 const LLT DstTy = Query.Types[0];
1250 });
1251
1253 .legalFor({{v16s8, v8s8}, {v8s16, v4s16}, {v4s32, v2s32}})
1254 .bitcastIf(
1256 return Query.Types[0].isFixedVector() &&
1257 Query.Types[1].isFixedVector() &&
1258 Query.Types[0].getSizeInBits() <= 128 &&
1259 Query.Types[1].getSizeInBits() <= 64;
1260 },
1262 const LLT DstTy = Query.Types[0];
1263 const LLT SrcTy = Query.Types[1];
1264 return std::pair(
1268 SrcTy.getNumElements())));
1269 });
1270
1272 .legalFor({{v8s8, v16s8}, {v4s16, v8s16}, {v2s32, v4s32}})
1274 .immIdx(0);
1275
1276
1278 .legalFor(HasSVE, {{nxv4s32, s32}, {nxv2s64, s64}});
1279
1281
1283
1285
1287
1289
1290 if (ST.hasMOPS()) {
1291
1292
1294
1297 .customForCartesianProduct({p0}, {s8}, {s64})
1298 .immIdx(0);
1299
1301 .legalForCartesianProduct({p0}, {p0}, {s64})
1302 .immIdx(0);
1303
1304
1307
1308 } else {
1311 }
1312
1313
1314
1315
1317 .legalFor({{s32, v2s32}, {s32, v4s32}, {s64, v2s64}})
1318 .legalFor(HasFP16, {{s16, v4s16}, {s16, v8s16}})
1319 .minScalarOrElt(0, MinFPScalar)
1326
1327
1328
1329
1339
1343
1346 {s8, v16s8},
1347 {s16, v4s16},
1348 {s16, v8s16},
1349 {s32, v2s32},
1350 {s32, v4s32},
1351 {s64, v2s64}})
1359
1361 G_VECREDUCE_FMINIMUM, G_VECREDUCE_FMAXIMUM})
1362 .legalFor({{s32, v2s32}, {s32, v4s32}, {s64, v2s64}})
1363 .legalFor(HasFP16, {{s16, v4s16}, {s16, v8s16}})
1364 .minScalarOrElt(0, MinFPScalar)
1370
1377
1379 {G_VECREDUCE_SMIN, G_VECREDUCE_SMAX, G_VECREDUCE_UMIN, G_VECREDUCE_UMAX})
1380 .legalFor({{s8, v8s8},
1381 {s8, v16s8},
1382 {s16, v4s16},
1383 {s16, v8s16},
1384 {s32, v2s32},
1385 {s32, v4s32}})
1386 .moreElementsIf(
1388 return Query.Types[1].isVector() &&
1389 Query.Types[1].getElementType() != s8 &&
1390 Query.Types[1].getNumElements() & 1;
1391 },
1393 .clampMaxNumElements(1, s64, 2)
1399
1401 {G_VECREDUCE_OR, G_VECREDUCE_AND, G_VECREDUCE_XOR})
1402
1403
1404
1405 .fewerElementsIf(
1408 if (SrcTy.isScalar())
1409 return false;
1411 return false;
1412
1413 return SrcTy.getSizeInBits() > 64;
1414 },
1417 return std::make_pair(1, SrcTy.divide(2));
1418 })
1421
1422
1424
1425
1427 G_GET_FPMODE, G_SET_FPMODE, G_RESET_FPMODE})
1429
1431
1433
1435
1437 verify(*ST.getInstrInfo());
1438}
1439
1446 switch (MI.getOpcode()) {
1447 default:
1448
1449 return false;
1450 case TargetOpcode::G_VAARG:
1451 return legalizeVaArg(MI, MRI, MIRBuilder);
1452 case TargetOpcode::G_LOAD:
1453 case TargetOpcode::G_STORE:
1454 return legalizeLoadStore(MI, MRI, MIRBuilder, Observer);
1455 case TargetOpcode::G_SHL:
1456 case TargetOpcode::G_ASHR:
1457 case TargetOpcode::G_LSHR:
1458 return legalizeShlAshrLshr(MI, MRI, MIRBuilder, Observer);
1459 case TargetOpcode::G_GLOBAL_VALUE:
1460 return legalizeSmallCMGlobalValue(MI, MRI, MIRBuilder, Observer);
1461 case TargetOpcode::G_SBFX:
1462 case TargetOpcode::G_UBFX:
1463 return legalizeBitfieldExtract(MI, MRI, Helper);
1464 case TargetOpcode::G_FSHL:
1465 case TargetOpcode::G_FSHR:
1466 return legalizeFunnelShift(MI, MRI, MIRBuilder, Observer, Helper);
1467 case TargetOpcode::G_ROTR:
1468 return legalizeRotate(MI, MRI, Helper);
1469 case TargetOpcode::G_CTPOP:
1470 return legalizeCTPOP(MI, MRI, Helper);
1471 case TargetOpcode::G_ATOMIC_CMPXCHG:
1472 return legalizeAtomicCmpxchg128(MI, MRI, Helper);
1473 case TargetOpcode::G_CTTZ:
1474 return legalizeCTTZ(MI, Helper);
1475 case TargetOpcode::G_BZERO:
1476 case TargetOpcode::G_MEMCPY:
1477 case TargetOpcode::G_MEMMOVE:
1478 case TargetOpcode::G_MEMSET:
1479 return legalizeMemOps(MI, Helper);
1480 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
1481 return legalizeExtractVectorElt(MI, MRI, Helper);
1482 case TargetOpcode::G_DYN_STACKALLOC:
1483 return legalizeDynStackAlloc(MI, Helper);
1484 case TargetOpcode::G_PREFETCH:
1485 return legalizePrefetch(MI, Helper);
1486 case TargetOpcode::G_ABS:
1488 case TargetOpcode::G_ICMP:
1489 return legalizeICMP(MI, MRI, MIRBuilder);
1490 case TargetOpcode::G_BITCAST:
1491 return legalizeBitcast(MI, Helper);
1492 case TargetOpcode::G_FPTRUNC:
1493
1494
1495 return legalizeFptrunc(MI, MIRBuilder, MRI);
1496 }
1497
1499}
1500
1501bool AArch64LegalizerInfo::legalizeBitcast(MachineInstr &MI,
1503 assert(MI.getOpcode() == TargetOpcode::G_BITCAST && "Unexpected opcode");
1504 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
1505
1506
1507 if (!DstTy.isScalar() || !SrcTy.isVector() ||
1508 SrcTy.getElementType() != LLT::scalar(1))
1509 return false;
1510
1512 MI.eraseFromParent();
1513 return true;
1514}
1515
1516bool AArch64LegalizerInfo::legalizeFunnelShift(MachineInstr &MI,
1521 assert(MI.getOpcode() == TargetOpcode::G_FSHL ||
1522 MI.getOpcode() == TargetOpcode::G_FSHR);
1523
1524
1525
1526 Register ShiftNo = MI.getOperand(3).getReg();
1527 LLT ShiftTy = MRI.getType(ShiftNo);
1529
1530
1531
1532 LLT OperationTy = MRI.getType(MI.getOperand(0).getReg());
1534
1535
1536 if (!VRegAndVal || VRegAndVal->Value.urem(BitWidth) == 0)
1539
1541
1542 Amount = MI.getOpcode() == TargetOpcode::G_FSHL ? BitWidth - Amount : Amount;
1543
1544
1545
1546 if (ShiftTy.getSizeInBits() == 64 && MI.getOpcode() == TargetOpcode::G_FSHR &&
1547 VRegAndVal->Value.ult(BitWidth))
1548 return true;
1549
1550
1552
1553 if (MI.getOpcode() == TargetOpcode::G_FSHR) {
1555 MI.getOperand(3).setReg(Cast64.getReg(0));
1557 }
1558
1559
1560 else if (MI.getOpcode() == TargetOpcode::G_FSHL) {
1561 MIRBuilder.buildInstr(TargetOpcode::G_FSHR, {MI.getOperand(0).getReg()},
1562 {MI.getOperand(1).getReg(), MI.getOperand(2).getReg(),
1563 Cast64.getReg(0)});
1564 MI.eraseFromParent();
1565 }
1566 return true;
1567}
1568
1569bool AArch64LegalizerInfo::legalizeICMP(MachineInstr &MI,
1572 Register DstReg = MI.getOperand(0).getReg();
1573 Register SrcReg1 = MI.getOperand(2).getReg();
1574 Register SrcReg2 = MI.getOperand(3).getReg();
1575 LLT DstTy = MRI.getType(DstReg);
1576 LLT SrcTy = MRI.getType(SrcReg1);
1577
1578
1582 return false;
1583
1584
1585
1588 return true;
1590 MIRBuilder
1593 MIRBuilder.buildNot(DstReg, CmpReg);
1594
1595 MI.eraseFromParent();
1596 return true;
1597}
1598
1599bool AArch64LegalizerInfo::legalizeRotate(MachineInstr &MI,
1602
1603
1604 Register AmtReg = MI.getOperand(2).getReg();
1605 LLT AmtTy = MRI.getType(AmtReg);
1606 (void)AmtTy;
1607 assert(AmtTy.isScalar() && "Expected a scalar rotate");
1608 assert(AmtTy.getSizeInBits() < 64 && "Expected this rotate to be legal");
1611 MI.getOperand(2).setReg(NewAmt.getReg(0));
1613 return true;
1614}
1615
1616bool AArch64LegalizerInfo::legalizeSmallCMGlobalValue(
1619 assert(MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
1620
1621
1622
1623
1624 auto &GlobalOp = MI.getOperand(1);
1625
1626 if (GlobalOp.isSymbol())
1627 return true;
1628 const auto* GV = GlobalOp.getGlobal();
1629 if (GV->isThreadLocal())
1630 return true;
1631
1632 auto &TM = ST->getTargetLowering()->getTargetMachine();
1633 unsigned OpFlags = ST->ClassifyGlobalReference(GV, TM);
1634
1636 return true;
1637
1638 auto Offset = GlobalOp.getOffset();
1639 Register DstReg = MI.getOperand(0).getReg();
1642
1643 MRI.setRegClass(ADRP.getReg(0), &AArch64::GPR64RegClass);
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1660 "Should not have folded in an offset for a tagged global!");
1662 .addGlobalAddress(GV, 0x100000000,
1665 MRI.setRegClass(ADRP.getReg(0), &AArch64::GPR64RegClass);
1666 }
1667
1668 MIRBuilder.buildInstr(AArch64::G_ADD_LOW, {DstReg}, {ADRP})
1669 .addGlobalAddress(GV, Offset,
1671 MI.eraseFromParent();
1672 return true;
1673}
1674
1679
1680 auto LowerUnaryOp = [&MI, &MIB](unsigned Opcode) {
1681 MIB.buildInstr(Opcode, {MI.getOperand(0)}, {MI.getOperand(2)});
1682 MI.eraseFromParent();
1683 return true;
1684 };
1685 auto LowerBinOp = [&MI, &MIB](unsigned Opcode) {
1687 {MI.getOperand(2), MI.getOperand(3)});
1688 MI.eraseFromParent();
1689 return true;
1690 };
1691 auto LowerTriOp = [&MI, &MIB](unsigned Opcode) {
1693 {MI.getOperand(2), MI.getOperand(3), MI.getOperand(4)});
1694 MI.eraseFromParent();
1695 return true;
1696 };
1697
1699 switch (IntrinsicID) {
1700 case Intrinsic::vacopy: {
1701 unsigned PtrSize = ST->isTargetILP32() ? 4 : 8;
1702 unsigned VaListSize =
1703 (ST->isTargetDarwin() || ST->isTargetWindows())
1704 ? PtrSize
1705 : ST->isTargetILP32() ? 20 : 32;
1706
1713 VaListSize, Align(PtrSize)));
1717 VaListSize, Align(PtrSize)));
1718 MI.eraseFromParent();
1719 return true;
1720 }
1721 case Intrinsic::get_dynamic_area_offset: {
1723 MI.eraseFromParent();
1724 return true;
1725 }
1726 case Intrinsic::aarch64_mops_memset_tag: {
1727 assert(MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
1728
1729
1730 auto &Value = MI.getOperand(3);
1732 Value.setReg(ExtValueReg);
1733 return true;
1734 }
1735 case Intrinsic::aarch64_prefetch: {
1736 auto &AddrVal = MI.getOperand(1);
1737
1738 int64_t IsWrite = MI.getOperand(2).getImm();
1739 int64_t Target = MI.getOperand(3).getImm();
1740 int64_t IsStream = MI.getOperand(4).getImm();
1741 int64_t IsData = MI.getOperand(5).getImm();
1742
1743 unsigned PrfOp = (IsWrite << 4) |
1744 (!IsData << 3) |
1745 (Target << 1) |
1746 (unsigned)IsStream;
1747
1749 MI.eraseFromParent();
1750 return true;
1751 }
1752 case Intrinsic::aarch64_neon_uaddv:
1753 case Intrinsic::aarch64_neon_saddv:
1754 case Intrinsic::aarch64_neon_umaxv:
1755 case Intrinsic::aarch64_neon_smaxv:
1756 case Intrinsic::aarch64_neon_uminv:
1757 case Intrinsic::aarch64_neon_sminv: {
1758 bool IsSigned = IntrinsicID == Intrinsic::aarch64_neon_saddv ||
1759 IntrinsicID == Intrinsic::aarch64_neon_smaxv ||
1760 IntrinsicID == Intrinsic::aarch64_neon_sminv;
1761
1762 auto OldDst = MI.getOperand(0).getReg();
1763 auto OldDstTy = MRI.getType(OldDst);
1764 LLT NewDstTy = MRI.getType(MI.getOperand(2).getReg()).getElementType();
1765 if (OldDstTy == NewDstTy)
1766 return true;
1767
1768 auto NewDst = MRI.createGenericVirtualRegister(NewDstTy);
1769
1771 MI.getOperand(0).setReg(NewDst);
1773
1775 MIB.buildExtOrTrunc(IsSigned ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT,
1776 OldDst, NewDst);
1777
1778 return true;
1779 }
1780 case Intrinsic::aarch64_neon_uaddlp:
1781 case Intrinsic::aarch64_neon_saddlp: {
1782 unsigned Opc = IntrinsicID == Intrinsic::aarch64_neon_uaddlp
1783 ? AArch64::G_UADDLP
1784 : AArch64::G_SADDLP;
1786 MI.eraseFromParent();
1787
1788 return true;
1789 }
1790 case Intrinsic::aarch64_neon_uaddlv:
1791 case Intrinsic::aarch64_neon_saddlv: {
1792 unsigned Opc = IntrinsicID == Intrinsic::aarch64_neon_uaddlv
1793 ? AArch64::G_UADDLV
1794 : AArch64::G_SADDLV;
1795 Register DstReg = MI.getOperand(0).getReg();
1796 Register SrcReg = MI.getOperand(2).getReg();
1797 LLT DstTy = MRI.getType(DstReg);
1798
1799 LLT MidTy, ExtTy;
1803 } else {
1806 }
1807
1812 Register ExtReg = MIB.buildInstr(AArch64::G_EXTRACT_VECTOR_ELT, {ExtTy},
1813 {MidReg, ZeroReg})
1815
1818 else
1820
1821 MI.eraseFromParent();
1822
1823 return true;
1824 }
1825 case Intrinsic::aarch64_neon_smax:
1826 return LowerBinOp(TargetOpcode::G_SMAX);
1827 case Intrinsic::aarch64_neon_smin:
1828 return LowerBinOp(TargetOpcode::G_SMIN);
1829 case Intrinsic::aarch64_neon_umax:
1830 return LowerBinOp(TargetOpcode::G_UMAX);
1831 case Intrinsic::aarch64_neon_umin:
1832 return LowerBinOp(TargetOpcode::G_UMIN);
1833 case Intrinsic::aarch64_neon_fmax:
1834 return LowerBinOp(TargetOpcode::G_FMAXIMUM);
1835 case Intrinsic::aarch64_neon_fmin:
1836 return LowerBinOp(TargetOpcode::G_FMINIMUM);
1837 case Intrinsic::aarch64_neon_fmaxnm:
1838 return LowerBinOp(TargetOpcode::G_FMAXNUM);
1839 case Intrinsic::aarch64_neon_fminnm:
1840 return LowerBinOp(TargetOpcode::G_FMINNUM);
1841 case Intrinsic::aarch64_neon_pmull:
1842 case Intrinsic::aarch64_neon_pmull64:
1843 return LowerBinOp(AArch64::G_PMULL);
1844 case Intrinsic::aarch64_neon_smull:
1845 return LowerBinOp(AArch64::G_SMULL);
1846 case Intrinsic::aarch64_neon_umull:
1847 return LowerBinOp(AArch64::G_UMULL);
1848 case Intrinsic::aarch64_neon_sabd:
1849 return LowerBinOp(TargetOpcode::G_ABDS);
1850 case Intrinsic::aarch64_neon_uabd:
1851 return LowerBinOp(TargetOpcode::G_ABDU);
1852 case Intrinsic::aarch64_neon_uhadd:
1853 return LowerBinOp(TargetOpcode::G_UAVGFLOOR);
1854 case Intrinsic::aarch64_neon_urhadd:
1855 return LowerBinOp(TargetOpcode::G_UAVGCEIL);
1856 case Intrinsic::aarch64_neon_shadd:
1857 return LowerBinOp(TargetOpcode::G_SAVGFLOOR);
1858 case Intrinsic::aarch64_neon_srhadd:
1859 return LowerBinOp(TargetOpcode::G_SAVGCEIL);
1860 case Intrinsic::aarch64_neon_abs: {
1861
1862 MIB.buildInstr(TargetOpcode::G_ABS, {MI.getOperand(0)}, {MI.getOperand(2)});
1863 MI.eraseFromParent();
1864 return true;
1865 }
1866 case Intrinsic::aarch64_neon_sqadd: {
1867 if (MRI.getType(MI.getOperand(0).getReg()).isVector())
1868 return LowerBinOp(TargetOpcode::G_SADDSAT);
1869 break;
1870 }
1871 case Intrinsic::aarch64_neon_sqsub: {
1872 if (MRI.getType(MI.getOperand(0).getReg()).isVector())
1873 return LowerBinOp(TargetOpcode::G_SSUBSAT);
1874 break;
1875 }
1876 case Intrinsic::aarch64_neon_uqadd: {
1877 if (MRI.getType(MI.getOperand(0).getReg()).isVector())
1878 return LowerBinOp(TargetOpcode::G_UADDSAT);
1879 break;
1880 }
1881 case Intrinsic::aarch64_neon_uqsub: {
1882 if (MRI.getType(MI.getOperand(0).getReg()).isVector())
1883 return LowerBinOp(TargetOpcode::G_USUBSAT);
1884 break;
1885 }
1886 case Intrinsic::aarch64_neon_udot:
1887 return LowerTriOp(AArch64::G_UDOT);
1888 case Intrinsic::aarch64_neon_sdot:
1889 return LowerTriOp(AArch64::G_SDOT);
1890 case Intrinsic::aarch64_neon_usdot:
1891 return LowerTriOp(AArch64::G_USDOT);
1892 case Intrinsic::aarch64_neon_sqxtn:
1893 return LowerUnaryOp(TargetOpcode::G_TRUNC_SSAT_S);
1894 case Intrinsic::aarch64_neon_sqxtun:
1895 return LowerUnaryOp(TargetOpcode::G_TRUNC_SSAT_U);
1896 case Intrinsic::aarch64_neon_uqxtn:
1897 return LowerUnaryOp(TargetOpcode::G_TRUNC_USAT_U);
1898
1899 case Intrinsic::vector_reverse:
1900
1901 return false;
1902 }
1903
1904 return true;
1905}
1906
1907bool AArch64LegalizerInfo::legalizeShlAshrLshr(
1910 assert(MI.getOpcode() == TargetOpcode::G_ASHR ||
1911 MI.getOpcode() == TargetOpcode::G_LSHR ||
1912 MI.getOpcode() == TargetOpcode::G_SHL);
1913
1914
1915 Register AmtReg = MI.getOperand(2).getReg();
1917 if (!VRegAndVal)
1918 return true;
1919
1920 int64_t Amount = VRegAndVal->Value.getSExtValue();
1921 if (Amount > 31)
1922 return true;
1925 MI.getOperand(2).setReg(ExtCst.getReg(0));
1927 return true;
1928}
1929
1932 Base = Root;
1934
1936 int64_t NewOffset;
1939 Base = NewBase;
1941 }
1942}
1943
1944
1945
1946bool AArch64LegalizerInfo::legalizeLoadStore(
1949 assert(MI.getOpcode() == TargetOpcode::G_STORE ||
1950 MI.getOpcode() == TargetOpcode::G_LOAD);
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960 Register ValReg = MI.getOperand(0).getReg();
1961 const LLT ValTy = MRI.getType(ValReg);
1962
1964
1966 bool IsLoad = MI.getOpcode() == TargetOpcode::G_LOAD;
1969 bool IsRcpC3 =
1970 ST->hasLSE2() && ST->hasRCPC3() && (IsLoadAcquire || IsStoreRelease);
1971
1973
1974 unsigned Opcode;
1975 if (IsRcpC3) {
1976 Opcode = IsLoad ? AArch64::LDIAPPX : AArch64::STILPX;
1977 } else {
1978
1979
1982 assert(ST->hasLSE2() && "ldp/stp not single copy atomic without +lse2");
1983
1984 Opcode = IsLoad ? AArch64::LDPXi : AArch64::STPXi;
1985 }
1986
1987 MachineInstrBuilder NewI;
1988 if (IsLoad) {
1989 NewI = MIRBuilder.buildInstr(Opcode, {s64, s64}, {});
1992 } else {
1995 Opcode, {}, {Split->getOperand(0), Split->getOperand(1)});
1996 }
1997
1998 if (IsRcpC3) {
1999 NewI.addUse(MI.getOperand(1).getReg());
2000 } else {
2006 }
2007
2010 *MRI.getTargetRegisterInfo(),
2011 *ST->getRegBankInfo());
2012 MI.eraseFromParent();
2013 return true;
2014 }
2015
2018 LLVM_DEBUG(dbgs() << "Tried to do custom legalization on wrong load/store");
2019 return false;
2020 }
2021
2024 auto &MMO = **MI.memoperands_begin();
2025 MMO.setType(NewTy);
2026
2027 if (MI.getOpcode() == TargetOpcode::G_STORE) {
2030 } else {
2031 auto NewLoad = MIRBuilder.buildLoad(NewTy, MI.getOperand(1), MMO);
2033 }
2034 MI.eraseFromParent();
2035 return true;
2036}
2037
2038bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr &MI,
2041 MachineFunction &MF = MIRBuilder.getMF();
2042 Align Alignment(MI.getOperand(2).getImm());
2043 Register Dst = MI.getOperand(0).getReg();
2044 Register ListPtr = MI.getOperand(1).getReg();
2045
2046 LLT PtrTy = MRI.getType(ListPtr);
2048
2049 const unsigned PtrSize = PtrTy.getSizeInBits() / 8;
2050 const Align PtrAlign = Align(PtrSize);
2052 PtrTy, ListPtr,
2054 PtrTy, PtrAlign));
2055
2056 MachineInstrBuilder DstPtr;
2057 if (Alignment > PtrAlign) {
2058
2059 auto AlignMinus1 =
2060 MIRBuilder.buildConstant(IntPtrTy, Alignment.value() - 1);
2061 auto ListTmp = MIRBuilder.buildPtrAdd(PtrTy, List, AlignMinus1.getReg(0));
2063 } else
2064 DstPtr = List;
2065
2066 LLT ValTy = MRI.getType(Dst);
2069 Dst, DstPtr,
2071 ValTy, std::max(Alignment, PtrAlign)));
2072
2074
2075 auto NewList = MIRBuilder.buildPtrAdd(PtrTy, DstPtr, Size.getReg(0));
2076
2077 MIRBuilder.buildStore(NewList, ListPtr,
2080 PtrTy, PtrAlign));
2081
2082 MI.eraseFromParent();
2083 return true;
2084}
2085
2086bool AArch64LegalizerInfo::legalizeBitfieldExtract(
2088
2089
2092}
2093
2094bool AArch64LegalizerInfo::legalizeCTPOP(MachineInstr &MI,
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
2118 Register Dst = MI.getOperand(0).getReg();
2119 Register Val = MI.getOperand(1).getReg();
2120 LLT Ty = MRI.getType(Val);
2122
2123 assert(Ty == MRI.getType(Dst) &&
2124 "Expected src and dst to have the same type!");
2125
2126 if (ST->hasCSSC() && Ty.isScalar() && Size == 128) {
2128
2130 auto CTPOP1 = MIRBuilder.buildCTPOP(s64, Split->getOperand(0));
2131 auto CTPOP2 = MIRBuilder.buildCTPOP(s64, Split->getOperand(1));
2132 auto Add = MIRBuilder.buildAdd(s64, CTPOP1, CTPOP2);
2133
2135 MI.eraseFromParent();
2136 return true;
2137 }
2138
2139 if (!ST->hasNEON() ||
2140 MI.getMF()->getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) {
2141
2145 }
2146
2147
2148
2149
2152 assert((Size == 32 || Size == 64 || Size == 128) && "Expected only 32, 64, or 128 bit scalars!");
2153 if (Size == 32) {
2154 Val = MIRBuilder.buildZExt(LLT::scalar(64), Val).getReg(0);
2155 }
2156 }
2158
2159
2161
2162
2163
2166 LLT Dt = Ty == LLT::fixed_vector(2, 64) ? LLT::fixed_vector(4, 32) : Ty;
2167 auto Zeros = MIRBuilder.buildConstant(Dt, 0);
2168 auto Ones = MIRBuilder.buildConstant(VTy, 1);
2169 MachineInstrBuilder Sum;
2170
2171 if (Ty == LLT::fixed_vector(2, 64)) {
2172 auto UDOT =
2173 MIRBuilder.buildInstr(AArch64::G_UDOT, {Dt}, {Zeros, Ones, CTPOP});
2174 Sum = MIRBuilder.buildInstr(AArch64::G_UADDLP, {Ty}, {UDOT});
2176 Sum = MIRBuilder.buildInstr(AArch64::G_UDOT, {Dt}, {Zeros, Ones, CTPOP});
2178 Sum = MIRBuilder.buildInstr(AArch64::G_UDOT, {Dt}, {Zeros, Ones, CTPOP});
2179 } else {
2181 }
2182
2184 MI.eraseFromParent();
2185 return true;
2186 }
2187
2189 unsigned Opc;
2192 Opc = Intrinsic::aarch64_neon_uaddlv;
2193 HAddTys.push_back(LLT::scalar(32));
2195 Opc = Intrinsic::aarch64_neon_uaddlp;
2198 Opc = Intrinsic::aarch64_neon_uaddlp;
2202 Opc = Intrinsic::aarch64_neon_uaddlp;
2207 Opc = Intrinsic::aarch64_neon_uaddlp;
2210 Opc = Intrinsic::aarch64_neon_uaddlp;
2213 } else
2216 for (LLT HTy : HAddTys) {
2218 HSum = UADD.getReg(0);
2219 }
2220
2221
2223 MIRBuilder.buildZExt(Dst, UADD);
2224 else
2226 MI.eraseFromParent();
2227 return true;
2228}
2229
2230bool AArch64LegalizerInfo::legalizeAtomicCmpxchg128(
2232 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
2234 auto Addr = MI.getOperand(1).getReg();
2235 auto DesiredI = MIRBuilder.buildUnmerge({s64, s64}, MI.getOperand(2));
2236 auto NewI = MIRBuilder.buildUnmerge({s64, s64}, MI.getOperand(3));
2237 auto DstLo = MRI.createGenericVirtualRegister(s64);
2238 auto DstHi = MRI.createGenericVirtualRegister(s64);
2239
2240 MachineInstrBuilder CAS;
2241 if (ST->hasLSE()) {
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251 auto Ordering = (*MI.memoperands_begin())->getMergedOrdering();
2252 unsigned Opcode;
2253 switch (Ordering) {
2255 Opcode = AArch64::CASPAX;
2256 break;
2258 Opcode = AArch64::CASPLX;
2259 break;
2262 Opcode = AArch64::CASPALX;
2263 break;
2264 default:
2265 Opcode = AArch64::CASPX;
2266 break;
2267 }
2268
2270 auto CASDst = MRI.createGenericVirtualRegister(s128);
2271 auto CASDesired = MRI.createGenericVirtualRegister(s128);
2272 auto CASNew = MRI.createGenericVirtualRegister(s128);
2273 MIRBuilder.buildInstr(TargetOpcode::REG_SEQUENCE, {CASDesired}, {})
2274 .addUse(DesiredI->getOperand(0).getReg())
2275 .addImm(AArch64::sube64)
2276 .addUse(DesiredI->getOperand(1).getReg())
2277 .addImm(AArch64::subo64);
2278 MIRBuilder.buildInstr(TargetOpcode::REG_SEQUENCE, {CASNew}, {})
2280 .addImm(AArch64::sube64)
2282 .addImm(AArch64::subo64);
2283
2284 CAS = MIRBuilder.buildInstr(Opcode, {CASDst}, {CASDesired, CASNew, Addr});
2285
2286 MIRBuilder.buildExtract({DstLo}, {CASDst}, 0);
2287 MIRBuilder.buildExtract({DstHi}, {CASDst}, 64);
2288 } else {
2289
2290
2291
2292 auto Ordering = (*MI.memoperands_begin())->getMergedOrdering();
2293 unsigned Opcode;
2294 switch (Ordering) {
2296 Opcode = AArch64::CMP_SWAP_128_ACQUIRE;
2297 break;
2299 Opcode = AArch64::CMP_SWAP_128_RELEASE;
2300 break;
2303 Opcode = AArch64::CMP_SWAP_128;
2304 break;
2305 default:
2306 Opcode = AArch64::CMP_SWAP_128_MONOTONIC;
2307 break;
2308 }
2309
2310 auto Scratch = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
2311 CAS = MIRBuilder.buildInstr(Opcode, {DstLo, DstHi, Scratch},
2312 {Addr, DesiredI->getOperand(0),
2313 DesiredI->getOperand(1), NewI->getOperand(0),
2315 }
2316
2319 *MRI.getTargetRegisterInfo(),
2320 *ST->getRegBankInfo());
2321
2323 MI.eraseFromParent();
2324 return true;
2325}
2326
2327bool AArch64LegalizerInfo::legalizeCTTZ(MachineInstr &MI,
2329 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
2330 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
2331 LLT Ty = MRI.getType(MI.getOperand(1).getReg());
2332 auto BitReverse = MIRBuilder.buildBitReverse(Ty, MI.getOperand(1));
2333 MIRBuilder.buildCTLZ(MI.getOperand(0).getReg(), BitReverse);
2334 MI.eraseFromParent();
2335 return true;
2336}
2337
2338bool AArch64LegalizerInfo::legalizeMemOps(MachineInstr &MI,
2340 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
2341
2342
2343 if (MI.getOpcode() == TargetOpcode::G_MEMSET) {
2344
2345
2346 auto &Value = MI.getOperand(1);
2349 Value.setReg(ExtValueReg);
2350 return true;
2351 }
2352
2353 return false;
2354}
2355
2356bool AArch64LegalizerInfo::legalizeExtractVectorElt(
2359 auto VRegAndVal =
2361 if (VRegAndVal)
2362 return true;
2365 return true;
2368}
2369
2370bool AArch64LegalizerInfo::legalizeDynStackAlloc(
2372 MachineFunction &MF = *MI.getParent()->getParent();
2373 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
2374 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
2375
2376
2377
2380 "inline-asm") {
2382 return true;
2383 }
2384
2385 Register Dst = MI.getOperand(0).getReg();
2386 Register AllocSize = MI.getOperand(1).getReg();
2388
2390 "Unexpected type for dynamic alloca");
2392 "Unexpected type for dynamic alloca");
2393
2394 LLT PtrTy = MRI.getType(Dst);
2399 auto NewMI =
2400 MIRBuilder.buildInstr(AArch64::PROBED_STACKALLOC_DYN, {}, {SPTmp});
2401 MRI.setRegClass(NewMI.getReg(0), &AArch64::GPR64commonRegClass);
2402 MIRBuilder.setInsertPt(*NewMI->getParent(), NewMI);
2403 MIRBuilder.buildCopy(Dst, SPTmp);
2404
2405 MI.eraseFromParent();
2406 return true;
2407}
2408
2409bool AArch64LegalizerInfo::legalizePrefetch(MachineInstr &MI,
2411 MachineIRBuilder &MIB = Helper.MIRBuilder;
2412 auto &AddrVal = MI.getOperand(0);
2413
2414 int64_t IsWrite = MI.getOperand(1).getImm();
2415 int64_t Locality = MI.getOperand(2).getImm();
2416 int64_t IsData = MI.getOperand(3).getImm();
2417
2418 bool IsStream = Locality == 0;
2419 if (Locality != 0) {
2420 assert(Locality <= 3 && "Prefetch locality out-of-range");
2421
2422
2423
2424 Locality = 3 - Locality;
2425 }
2426
2427 unsigned PrfOp = (IsWrite << 4) | (!IsData << 3) | (Locality << 1) | IsStream;
2428
2430 MI.eraseFromParent();
2431 return true;
2432}
2433
2434bool AArch64LegalizerInfo::legalizeFptrunc(MachineInstr &MI,
2437 auto [Dst, DstTy, Src, SrcTy] = MI.getFirst2RegLLTs();
2439 "Expected a power of 2 elements");
2440
2449
2453
2455
2456
2457 int StepSize = ElemCount % 4 ? 2 : 4;
2458
2459
2460
2461 if (ElemCount <= 2)
2463 else {
2464 for (unsigned i = 0; i < ElemCount / 2; ++i)
2465 RegsToUnmergeTo.push_back(MRI.createGenericVirtualRegister(v2s64));
2466
2467 MIRBuilder.buildUnmerge(RegsToUnmergeTo, Src);
2468 }
2469
2470
2471 for (auto SrcReg : RegsToUnmergeTo) {
2473 MIRBuilder.buildInstr(AArch64::G_FPTRUNC_ODD, {v2s32}, {SrcReg})
2476 }
2477
2478
2479
2480 unsigned Index = 0;
2481 for (unsigned LoopIter = 0; LoopIter < ElemCount / StepSize; ++LoopIter) {
2482 if (StepSize == 4) {
2484 MIRBuilder
2486 {v4s32}, {TruncOddDstRegs[Index++], TruncOddDstRegs[Index++]})
2488
2491 } else {
2493 MIRBuilder.buildFPTrunc(v2s16, TruncOddDstRegs[Index++]).getReg(0));
2494 }
2495 }
2496
2497
2498 if (RegsToMerge.size() == 1) {
2500 MI.eraseFromParent();
2501 return true;
2502 }
2503
2504
2506 MRI.replaceRegWith(Dst, Fin);
2507 MI.eraseFromParent();
2508 return true;
2509}
unsigned const MachineRegisterInfo * MRI
static void matchLDPSTPAddrMode(Register Root, Register &Base, int &Offset, MachineRegisterInfo &MRI)
Definition AArch64LegalizerInfo.cpp:1930
This file declares the targeting of the Machinelegalizer class for AArch64.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static Error unsupported(const char *Str, const Triple &T)
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
Interface for Targets to specify which operations they can successfully select and how the others sho...
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Promote Memory to Register
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static constexpr MCPhysReg SPReg
bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI, LostDebugLocObserver &LocObserver) const override
Called for instructions with the Custom LegalizationAction.
Definition AArch64LegalizerInfo.cpp:1440
bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override
Definition AArch64LegalizerInfo.cpp:1675
AArch64LegalizerInfo(const AArch64Subtarget &ST)
Definition AArch64LegalizerInfo.cpp:42
Class for arbitrary precision integers.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
int64_t getSExtValue() const
Get sign extended value.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Abstract class that contains various methods for clients to notify about changes.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
static constexpr LLT scalable_vector(unsigned MinNumElements, unsigned ScalarSizeInBits)
Get a low-level scalable vector of some number of elements and element width.
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
constexpr bool isPointerVector() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
constexpr LLT changeElementSize(unsigned NewEltSize) const
If this type is a vector, return a vector with the same number of elements but the new element size.
constexpr unsigned getAddressSpace() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
constexpr LLT changeElementCount(ElementCount EC) const
Return a vector or scalar with the same element type and the new element count.
LLVM_ABI void computeTables()
Compute any ancillary tables needed to quickly decide how an operation should be handled.
LegalizeRuleSet & minScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at least as wide as Ty.
LegalizeRuleSet & widenScalarOrEltToNextPow2OrMinSize(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar or vector element type to the next power of two that is at least MinSize.
LegalizeRuleSet & legalFor(std::initializer_list< LLT > Types)
The instruction is legal when type index 0 is any type in the given list.
LegalizeRuleSet & maxScalarEltSameAsIf(LegalityPredicate Predicate, unsigned TypeIdx, unsigned SmallTypeIdx)
Conditionally narrow the scalar or elt to match the size of another.
LegalizeRuleSet & unsupported()
The instruction is unsupported.
LegalizeRuleSet & scalarSameSizeAs(unsigned TypeIdx, unsigned SameSizeIdx)
Change the type TypeIdx to have the same scalar size as type SameSizeIdx.
LegalizeRuleSet & bitcastIf(LegalityPredicate Predicate, LegalizeMutation Mutation)
The specified type index is coerced if predicate is true.
LegalizeRuleSet & libcallFor(std::initializer_list< LLT > Types)
LegalizeRuleSet & maxScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at most as wide as Ty.
LegalizeRuleSet & minScalarOrElt(unsigned TypeIdx, const LLT Ty)
Ensure the scalar or element is at least as wide as Ty.
LegalizeRuleSet & clampMaxNumElements(unsigned TypeIdx, const LLT EltTy, unsigned MaxElements)
Limit the number of elements in EltTy vectors to at most MaxElements.
LegalizeRuleSet & clampMinNumElements(unsigned TypeIdx, const LLT EltTy, unsigned MinElements)
Limit the number of elements in EltTy vectors to at least MinElements.
LegalizeRuleSet & widenVectorEltsToVectorMinSize(unsigned TypeIdx, unsigned VectorSize)
Ensure the vector size is at least as wide as VectorSize by promoting the element.
LegalizeRuleSet & lowerIfMemSizeNotPow2()
Lower a memory operation if the memory size, rounded to bytes, is not a power of 2.
LegalizeRuleSet & minScalarEltSameAsIf(LegalityPredicate Predicate, unsigned TypeIdx, unsigned LargeTypeIdx)
Conditionally widen the scalar or elt to match the size of another.
LegalizeRuleSet & customForCartesianProduct(std::initializer_list< LLT > Types)
LegalizeRuleSet & lowerIfMemSizeNotByteSizePow2()
Lower a memory operation if the memory access size is not a round power of 2 byte size.
LegalizeRuleSet & moreElementsToNextPow2(unsigned TypeIdx)
Add more elements to the vector to reach the next power of two.
LegalizeRuleSet & narrowScalarIf(LegalityPredicate Predicate, LegalizeMutation Mutation)
Narrow the scalar to the one selected by the mutation if the predicate is true.
LegalizeRuleSet & lower()
The instruction is lowered.
LegalizeRuleSet & moreElementsIf(LegalityPredicate Predicate, LegalizeMutation Mutation)
Add more elements to reach the type selected by the mutation if the predicate is true.
LegalizeRuleSet & lowerFor(std::initializer_list< LLT > Types)
The instruction is lowered when type index 0 is any type in the given list.
LegalizeRuleSet & scalarizeIf(LegalityPredicate Predicate, unsigned TypeIdx)
LegalizeRuleSet & lowerIf(LegalityPredicate Predicate)
The instruction is lowered if predicate is true.
LegalizeRuleSet & clampScalar(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the range of scalar sizes to MinTy and MaxTy.
LegalizeRuleSet & custom()
Unconditionally custom lower.
LegalizeRuleSet & minScalarSameAs(unsigned TypeIdx, unsigned LargeTypeIdx)
Widen the scalar to match the size of another.
LegalizeRuleSet & unsupportedIf(LegalityPredicate Predicate)
LegalizeRuleSet & minScalarOrEltIf(LegalityPredicate Predicate, unsigned TypeIdx, const LLT Ty)
Ensure the scalar or element is at least as wide as Ty.
LegalizeRuleSet & widenScalarIf(LegalityPredicate Predicate, LegalizeMutation Mutation)
Widen the scalar to the one selected by the mutation if the predicate is true.
LegalizeRuleSet & alwaysLegal()
LegalizeRuleSet & clampNumElements(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the number of elements for the given vectors to at least MinTy's number of elements and at most...
LegalizeRuleSet & maxScalarIf(LegalityPredicate Predicate, unsigned TypeIdx, const LLT Ty)
Conditionally limit the maximum size of the scalar.
LegalizeRuleSet & customIf(LegalityPredicate Predicate)
LegalizeRuleSet & widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar to the next power of two that is at least MinSize.
LegalizeRuleSet & scalarize(unsigned TypeIdx)
LegalizeRuleSet & legalForCartesianProduct(std::initializer_list< LLT > Types)
The instruction is legal when type indexes 0 and 1 are both in the given list.
LegalizeRuleSet & legalForTypesWithMemDesc(std::initializer_list< LegalityPredicates::TypePairAndMemDesc > TypesAndMemDesc)
The instruction is legal when type indexes 0 and 1 along with the memory size and minimum alignment i...
unsigned immIdx(unsigned ImmIdx)
LegalizeRuleSet & widenScalarOrEltToNextPow2(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar or vector element type to the next power of two that is at least MinSize.
LegalizeRuleSet & legalIf(LegalityPredicate Predicate)
The instruction is legal if predicate is true.
LLVM_ABI LegalizeResult lowerDynStackAlloc(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerBitCount(MachineInstr &MI)
LLVM_ABI LegalizeResult lowerExtractInsertVectorElt(MachineInstr &MI)
Lower a vector extract or insert by writing the vector to a stack temporary and reloading the element...
LLVM_ABI LegalizeResult lowerAbsToCNeg(MachineInstr &MI)
const TargetLowering & getTargetLowering() const
LLVM_ABI LegalizeResult lowerFunnelShiftAsShifts(MachineInstr &MI)
LLVM_ABI MachineInstrBuilder createStackStoreLoad(const DstOp &Res, const SrcOp &Val)
Create a store of Val to a stack temporary and return a load as the same type as Res.
@ Legalized
Instruction has been legalized and the MachineFunction changed.
@ UnableToLegalize
Some kind of error has occurred and we could not legalize this instruction.
GISelChangeObserver & Observer
To keep track of changes made by the LegalizerHelper.
LLVM_ABI Register getDynStackAllocTargetPtr(Register SPReg, Register AllocSize, Align Alignment, LLT PtrTy)
MachineIRBuilder & MIRBuilder
Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.
LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)
Get the action definition builder for the given opcode.
const LegacyLegalizerInfo & getLegacyLegalizerInfo() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II)
Set the insertion point before the specified position.
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildNot(const DstOp &Dst, const SrcOp &Src0)
Build and insert a bitwise not, NegOne = G_CONSTANT -1 Res = G_OR Op0, NegOne.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildCTLZ(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_CTLZ Op0, Src0.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildBitReverse(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_BITREVERSE Src.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert = Opcode .
MachineInstrBuilder buildCTPOP(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_CTPOP Op0, Src0.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_TRUNC Op.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildBitcast(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_BITCAST Src.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0, uint32_t NumBits)
Build and insert Res = G_PTRMASK Op0, G_CONSTANT (1 << NumBits) - 1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
Representation of each machine instruction.
const MachineOperand & getOperand(unsigned i) const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
Wrapper class representing virtual and physical registers.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
Primary interface to the complete machine description for the target machine.
CodeModel::Model getCodeModel() const
Returns the code model.
Target - Wrapper for Target specific information.
LLVM Value Representation.
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
@ MO_PREL
MO_PREL - Indicates that the bits of the symbol operand represented by MO_G0 etc are PC relative.
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
@ MO_G3
MO_G3 - A symbol operand with this flag (granule 3) represents the high 16-bits of a 64-bit address,...
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
LLVM_ABI LegalityPredicate scalarOrEltWiderThan(unsigned TypeIdx, unsigned Size)
True iff the specified type index is a scalar or a vector with an element type that's wider than the ...
LLVM_ABI LegalityPredicate isPointerVector(unsigned TypeIdx)
True iff the specified type index is a vector of pointers (with any address space).
LLVM_ABI LegalityPredicate typeInSet(unsigned TypeIdx, std::initializer_list< LLT > TypesInit)
True iff the given type index is one of the specified types.
LLVM_ABI LegalityPredicate smallerThan(unsigned TypeIdx0, unsigned TypeIdx1)
True iff the first type index has a smaller total bit size than second type index.
LLVM_ABI LegalityPredicate atomicOrderingAtLeastOrStrongerThan(unsigned MMOIdx, AtomicOrdering Ordering)
True iff the specified MMO index has at an atomic ordering of at Ordering or stronger.
Predicate any(Predicate P0, Predicate P1)
True iff P0 or P1 are true.
LLVM_ABI LegalityPredicate isVector(unsigned TypeIdx)
True iff the specified type index is a vector.
Predicate all(Predicate P0, Predicate P1)
True iff P0 and P1 are true.
LLVM_ABI LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit)
True iff the given type index is the specified type.
LLVM_ABI LegalityPredicate scalarWiderThan(unsigned TypeIdx, unsigned Size)
True iff the specified type index is a scalar that's wider than the given size.
@ Bitcast
Perform the operation on a different, but equivalently sized type.
LLVM_ABI LegalizeMutation moreElementsToNextPow2(unsigned TypeIdx, unsigned Min=0)
Add more elements to the type for the given type index to the next power of.
LLVM_ABI LegalizeMutation scalarize(unsigned TypeIdx)
Break up the vector type for the given type index into the element type.
LLVM_ABI LegalizeMutation changeElementTo(unsigned TypeIdx, unsigned FromTypeIdx)
Keep the same scalar or element type as the given type index.
LLVM_ABI LegalizeMutation widenScalarOrEltToNextPow2(unsigned TypeIdx, unsigned Min=0)
Widen the scalar type or vector element type for the given type index to the next power of 2.
LLVM_ABI LegalizeMutation changeTo(unsigned TypeIdx, LLT Ty)
Select this specific type for the given type index.
LLVM_ABI LegalizeMutation changeElementSizeTo(unsigned TypeIdx, unsigned FromTypeIdx)
Change the scalar size or element size to have the same scalar size as type index FromIndex.
operand_type_match m_Reg()
ConstantMatch< APInt > m_ICst(APInt &Cst)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_PTR_ADD, false > m_GPtrAdd(const LHS &L, const RHS &R)
Invariant opcodes: All instruction sets have these as their low opcodes.
This is an optimization pass for GlobalISel generic memory operations.
FunctionAddr VTableAddr Value
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
std::function< bool(const LegalityQuery &)> LegalityPredicate
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
AtomicOrdering
Atomic ordering for LLVM's memory model.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
unsigned Log2(Align A)
Returns the log2 of the alignment.
This struct is a compact representation of a valid (non-zero power of two) alignment.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
ArrayRef< MemDesc > MMODescrs
Operations which require memory can use this to place requirements on the memory type for each MMO.
This class contains a discriminated union of information about pointers in memory operands,...