LLVM: lib/Target/SPIRV/SPIRVInstructionSelector.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
32#include "llvm/IR/IntrinsicsSPIRV.h"
35
36#define DEBUG_TYPE "spirv-isel"
37
38using namespace llvm;
41
43 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
44
45namespace {
46
47llvm::SPIRV::SelectionControl::SelectionControl
48getSelectionOperandForImm(int Imm) {
49 if (Imm == 2)
50 return SPIRV::SelectionControl::Flatten;
51 if (Imm == 1)
52 return SPIRV::SelectionControl::DontFlatten;
53 if (Imm == 0)
54 return SPIRV::SelectionControl::None;
56}
57
58#define GET_GLOBALISEL_PREDICATE_BITSET
59#include "SPIRVGenGlobalISel.inc"
60#undef GET_GLOBALISEL_PREDICATE_BITSET
61
70
71
72
75
76public:
83
86
87#define GET_GLOBALISEL_PREDICATES_DECL
88#include "SPIRVGenGlobalISel.inc"
89#undef GET_GLOBALISEL_PREDICATES_DECL
90
91#define GET_GLOBALISEL_TEMPORARIES_DECL
92#include "SPIRVGenGlobalISel.inc"
93#undef GET_GLOBALISEL_TEMPORARIES_DECL
94
95private:
99
100
101
103
104
105
108
109 bool selectFirstBitHigh(Register ResVReg, const SPIRVType *ResType,
111
112 bool selectFirstBitLow(Register ResVReg, const SPIRVType *ResType,
114
115 bool selectFirstBitSet16(Register ResVReg, const SPIRVType *ResType,
117 unsigned BitSetOpcode) const;
118
119 bool selectFirstBitSet32(Register ResVReg, const SPIRVType *ResType,
121 unsigned BitSetOpcode) const;
122
123 bool selectFirstBitSet64(Register ResVReg, const SPIRVType *ResType,
125 unsigned BitSetOpcode, bool SwapPrimarySide) const;
126
127 bool selectFirstBitSet64Overflow(Register ResVReg, const SPIRVType *ResType,
129 unsigned BitSetOpcode,
130 bool SwapPrimarySide) const;
131
134
135 bool selectOpWithSrcs(Register ResVReg, const SPIRVType *ResType,
137 unsigned Opcode) const;
138
140 unsigned Opcode) const;
141
144
148
152
157
160 unsigned NegateOpcode = 0) const;
161
162 bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
164
166
167 bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
169
172
175
178
179 bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
181
182 bool selectBuildVector(Register ResVReg, const SPIRVType *ResType,
184 bool selectSplatVector(Register ResVReg, const SPIRVType *ResType,
186
188 unsigned comparisonOpcode, MachineInstr &I) const;
191
196
199
202
203 bool selectOverflowArith(Register ResVReg, const SPIRVType *ResType,
207
208 bool selectIntegerDot(Register ResVReg, const SPIRVType *ResType,
210
211 bool selectIntegerDotExpansion(Register ResVReg, const SPIRVType *ResType,
213
216
219
220 template
221 bool selectDot4AddPacked(Register ResVReg, const SPIRVType *ResType,
223 template
224 bool selectDot4AddPackedExpansion(Register ResVReg, const SPIRVType *ResType,
226
227 bool selectWaveReduceMax(Register ResVReg, const SPIRVType *ResType,
229
230 bool selectWaveReduceMin(Register ResVReg, const SPIRVType *ResType,
232
233 bool selectWaveReduceSum(Register ResVReg, const SPIRVType *ResType,
235
238
241 bool selectSelectDefaultArgs(Register ResVReg, const SPIRVType *ResType,
244 bool IsSigned, unsigned Opcode) const;
246 bool IsSigned) const;
247
250
252 bool IsSigned) const;
253
256
263 bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
267 bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
273
274 bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
276 bool selectAllocaArray(Register ResVReg, const SPIRVType *ResType,
278
281
284
288 MachineInstr &I, CL::OpenCLExtInst CLInst) const;
291 GL::GLSLExtInst GLInst) const;
294 bool selectExtInstForLRound(Register ResVReg, const SPIRVType *ResType,
296 GL::GLSLExtInst GLInst) const;
297 bool selectExtInstForLRound(Register ResVReg, const SPIRVType *ResType,
300
303
306
307 bool selectWaveOpInst(Register ResVReg, const SPIRVType *ResType,
309
310 bool selectWaveActiveCountBits(Register ResVReg, const SPIRVType *ResType,
312
314
315 bool selectHandleFromBinding(Register &ResVReg, const SPIRVType *ResType,
317
318 bool selectCounterHandleFromBinding(Register &ResVReg,
321
322 bool selectReadImageIntrinsic(Register &ResVReg, const SPIRVType *ResType,
324 bool selectImageWriteIntrinsic(MachineInstr &I) const;
325 bool selectResourceGetPointer(Register &ResVReg, const SPIRVType *ResType,
327 bool selectResourceNonUniformIndex(Register &ResVReg,
332 bool selectUpdateCounter(Register &ResVReg, const SPIRVType *ResType,
336 bool selectDerivativeInst(Register ResVReg, const SPIRVType *ResType,
337 MachineInstr &I, const unsigned DPdOpCode) const;
338
339 std::pair<Register, bool>
341 const SPIRVType *ResType = nullptr) const;
342
348
351
353 SPIRV::StorageClass::StorageClass SC) const;
360 SPIRV::StorageClass::StorageClass SC,
366 bool extractSubvector(Register &ResVReg, const SPIRVType *ResType,
368 bool generateImageReadOrFetch(Register &ResVReg, const SPIRVType *ResType,
372 bool loadVec3BuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
375 bool loadBuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
378 bool loadHandleBeforePosition(Register &HandleReg, const SPIRVType *ResType,
380 void decorateUsesAsNonUniform(Register &NonUniformReg) const;
381 void errorIfInstrOutsideShader(MachineInstr &I) const;
382};
383
384bool sampledTypeIsSignedInteger(const llvm::Type *HandleType) {
386 if (TET->getTargetExtName() == "spirv.Image") {
387 return false;
388 }
389 assert(TET->getTargetExtName() == "spirv.SignedImage");
390 return TET->getTypeParameter(0)->isIntegerTy();
391}
392}
393
394#define GET_GLOBALISEL_IMPL
395#include "SPIRVGenGlobalISel.inc"
396#undef GET_GLOBALISEL_IMPL
397
398SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
402 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
403 MRI(nullptr),
405#include "SPIRVGenGlobalISel.inc"
408#include "SPIRVGenGlobalISel.inc"
410{
411}
412
413void SPIRVInstructionSelector::setupMF(MachineFunction &MF,
420 InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
421}
422
423
424void SPIRVInstructionSelector::resetVRegsType(MachineFunction &MF) {
425 if (HasVRegsReset == &MF)
426 return;
427 HasVRegsReset = &MF;
428
430 for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
432 LLT RegType = MRI.getType(Reg);
439 }
440 for (const auto &MBB : MF) {
441 for (const auto &MI : MBB) {
444 if (MI.getOpcode() != SPIRV::ASSIGN_TYPE)
445 continue;
446
447 Register DstReg = MI.getOperand(0).getReg();
448 LLT DstType = MRI.getType(DstReg);
449 Register SrcReg = MI.getOperand(1).getReg();
450 LLT SrcType = MRI.getType(SrcReg);
451 if (DstType != SrcType)
452 MRI.setType(DstReg, MRI.getType(SrcReg));
453
454 const TargetRegisterClass *DstRC = MRI.getRegClassOrNull(DstReg);
455 const TargetRegisterClass *SrcRC = MRI.getRegClassOrNull(SrcReg);
456 if (DstRC != SrcRC && SrcRC)
457 MRI.setRegClass(DstReg, SrcRC);
458 }
459 }
460}
461
462
466
468 return true;
469 Visited.insert(OpDef);
470
471 unsigned Opcode = OpDef->getOpcode();
472 switch (Opcode) {
473 case TargetOpcode::G_CONSTANT:
474 case TargetOpcode::G_FCONSTANT:
475 case TargetOpcode::G_IMPLICIT_DEF:
476 return true;
477 case TargetOpcode::G_INTRINSIC:
478 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
479 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
481 Intrinsic::spv_const_composite;
482 case TargetOpcode::G_BUILD_VECTOR:
483 case TargetOpcode::G_SPLAT_VECTOR: {
484 for (unsigned i = OpDef->getNumExplicitDefs(); i < OpDef->getNumOperands();
485 i++) {
489 : nullptr;
490 if (OpNestedDef && (MRI, OpNestedDef, Visited))
491 return false;
492 }
493 return true;
494 case SPIRV::OpConstantTrue:
495 case SPIRV::OpConstantFalse:
496 case SPIRV::OpConstantI:
497 case SPIRV::OpConstantF:
498 case SPIRV::OpConstantComposite:
499 case SPIRV::OpConstantCompositeContinuedINTEL:
500 case SPIRV::OpConstantSampler:
501 case SPIRV::OpConstantNull:
502 case SPIRV::OpUndef:
503 case SPIRV::OpConstantFunctionPointerINTEL:
504 return true;
505 }
506 }
507 return false;
508}
509
510
517
518
519
520
521
522
523
524
525
527 switch (ID) {
528
529 case Intrinsic::spv_all:
530 case Intrinsic::spv_alloca:
531 case Intrinsic::spv_any:
532 case Intrinsic::spv_bitcast:
533 case Intrinsic::spv_const_composite:
534 case Intrinsic::spv_cross:
535 case Intrinsic::spv_degrees:
536 case Intrinsic::spv_distance:
537 case Intrinsic::spv_extractelt:
538 case Intrinsic::spv_extractv:
539 case Intrinsic::spv_faceforward:
540 case Intrinsic::spv_fdot:
541 case Intrinsic::spv_firstbitlow:
542 case Intrinsic::spv_firstbitshigh:
543 case Intrinsic::spv_firstbituhigh:
544 case Intrinsic::spv_frac:
545 case Intrinsic::spv_gep:
546 case Intrinsic::spv_global_offset:
547 case Intrinsic::spv_global_size:
548 case Intrinsic::spv_group_id:
549 case Intrinsic::spv_insertelt:
550 case Intrinsic::spv_insertv:
551 case Intrinsic::spv_isinf:
552 case Intrinsic::spv_isnan:
553 case Intrinsic::spv_lerp:
554 case Intrinsic::spv_length:
555 case Intrinsic::spv_normalize:
556 case Intrinsic::spv_num_subgroups:
557 case Intrinsic::spv_num_workgroups:
558 case Intrinsic::spv_ptrcast:
559 case Intrinsic::spv_radians:
560 case Intrinsic::spv_reflect:
561 case Intrinsic::spv_refract:
562 case Intrinsic::spv_resource_getpointer:
563 case Intrinsic::spv_resource_handlefrombinding:
564 case Intrinsic::spv_resource_handlefromimplicitbinding:
565 case Intrinsic::spv_resource_nonuniformindex:
566 case Intrinsic::spv_rsqrt:
567 case Intrinsic::spv_saturate:
568 case Intrinsic::spv_sdot:
569 case Intrinsic::spv_sign:
570 case Intrinsic::spv_smoothstep:
571 case Intrinsic::spv_step:
572 case Intrinsic::spv_subgroup_id:
573 case Intrinsic::spv_subgroup_local_invocation_id:
574 case Intrinsic::spv_subgroup_max_size:
575 case Intrinsic::spv_subgroup_size:
576 case Intrinsic::spv_thread_id:
577 case Intrinsic::spv_thread_id_in_group:
578 case Intrinsic::spv_udot:
579 case Intrinsic::spv_undef:
580 case Intrinsic::spv_value_md:
581 case Intrinsic::spv_workgroup_size:
582 return false;
583 default:
584 return true;
585 }
586}
587
588
589
590
592 switch (Opcode) {
593 case SPIRV::OpTypeVoid:
594 case SPIRV::OpTypeBool:
595 case SPIRV::OpTypeInt:
596 case SPIRV::OpTypeFloat:
597 case SPIRV::OpTypeVector:
598 case SPIRV::OpTypeMatrix:
599 case SPIRV::OpTypeImage:
600 case SPIRV::OpTypeSampler:
601 case SPIRV::OpTypeSampledImage:
602 case SPIRV::OpTypeArray:
603 case SPIRV::OpTypeRuntimeArray:
604 case SPIRV::OpTypeStruct:
605 case SPIRV::OpTypeOpaque:
606 case SPIRV::OpTypePointer:
607 case SPIRV::OpTypeFunction:
608 case SPIRV::OpTypeEvent:
609 case SPIRV::OpTypeDeviceEvent:
610 case SPIRV::OpTypeReserveId:
611 case SPIRV::OpTypeQueue:
612 case SPIRV::OpTypePipe:
613 case SPIRV::OpTypeForwardPointer:
614 case SPIRV::OpTypePipeStorage:
615 case SPIRV::OpTypeNamedBarrier:
616 case SPIRV::OpTypeAccelerationStructureNV:
617 case SPIRV::OpTypeCooperativeMatrixNV:
618 case SPIRV::OpTypeCooperativeMatrixKHR:
619 return true;
620 default:
621 return false;
622 }
623}
624
626
627
628 if (MI.getNumDefs() == 0)
629 return false;
630
631 for (const auto &MO : MI.all_defs()) {
633 if (Reg.isPhysical()) {
634 LLVM_DEBUG(dbgs() << "Not dead: def of physical register " << Reg);
635 return false;
636 }
637 for (const auto &UseMI : MRI.use_nodbg_instructions(Reg)) {
638 if (UseMI.getOpcode() != SPIRV::OpName) {
640 return false;
641 }
642 }
643 }
644
645 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE || MI.isFakeUse() ||
646 MI.isLifetimeMarker()) {
649 << "Not dead: Opcode is LOCAL_ESCAPE, fake use, or lifetime marker.\n");
650 return false;
651 }
652 if (MI.isPHI()) {
653 LLVM_DEBUG(dbgs() << "Dead: Phi instruction with no uses.\n");
654 return true;
655 }
656
657
658
659
660 if (MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
661 MI.getOpcode() == TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS) {
664 LLVM_DEBUG(dbgs() << "Dead: Intrinsic with no real side effects.\n");
665 return true;
666 }
667 }
668
669 if (MI.mayStore() || MI.isCall() ||
670 (MI.mayLoad() && MI.hasOrderedMemoryRef()) || MI.isPosition() ||
671 MI.isDebugInstr() || MI.isTerminator() || MI.isJumpTableDebugInfo()) {
672 LLVM_DEBUG(dbgs() << "Not dead: instruction has side effects.\n");
673 return false;
674 }
675
677
678 LLVM_DEBUG(dbgs() << "Dead: Generic opcode with no uses.\n");
679 return true;
680 }
681
683 LLVM_DEBUG(dbgs() << "Dead: known opcode with no side effects\n");
684 return true;
685 }
686
687 return false;
688}
689
690void SPIRVInstructionSelector::removeOpNamesForDeadMI(MachineInstr &MI) const {
691
692 for (const auto &MO : MI.all_defs()) {
695 continue;
696 SmallVector<MachineInstr *, 4> UselessOpNames;
697 for (MachineInstr &UseMI : MRI->use_nodbg_instructions(Reg)) {
698 assert(UseMI.getOpcode() == SPIRV::OpName &&
699 "There is still a use of the dead function.");
701 }
702 for (MachineInstr *OpNameMI : UselessOpNames) {
704 OpNameMI->eraseFromParent();
705 }
706 }
707}
708
709void SPIRVInstructionSelector::removeDeadInstruction(MachineInstr &MI) const {
712 removeOpNamesForDeadMI(MI);
713 MI.eraseFromParent();
714}
715
716bool SPIRVInstructionSelector::select(MachineInstr &I) {
717 resetVRegsType(*I.getParent()->getParent());
718
719 assert(I.getParent() && "Instruction should be in a basic block!");
720 assert(I.getParent()->getParent() && "Instruction should be in a function!");
721
722 LLVM_DEBUG(dbgs() << "Checking if instruction is dead: " << I;);
725 removeDeadInstruction(I);
726 return true;
727 }
728
730
732 if (Opcode == SPIRV::ASSIGN_TYPE) {
733 Register DstReg = I.getOperand(0).getReg();
734 Register SrcReg = I.getOperand(1).getReg();
735 auto *Def = MRI->getVRegDef(SrcReg);
737 Def->getOpcode() != TargetOpcode::G_CONSTANT &&
738 Def->getOpcode() != TargetOpcode::G_FCONSTANT) {
739 bool Res = false;
740 if (Def->getOpcode() == TargetOpcode::G_SELECT) {
741 Register SelectDstReg = Def->getOperand(0).getReg();
743 *Def);
745 Def->removeFromParent();
746 MRI->replaceRegWith(DstReg, SelectDstReg);
748 I.removeFromParent();
749 } else
750 Res = selectImpl(I, *CoverageInfo);
752 if (!Res && Def->getOpcode() != TargetOpcode::G_CONSTANT) {
753 dbgs() << "Unexpected pattern in ASSIGN_TYPE.\nInstruction: ";
755 }
756 });
757 assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
758 if (Res) {
760 DeadMIs.insert(Def);
761 return Res;
762 }
763 }
764 MRI->setRegClass(SrcReg, MRI->getRegClass(DstReg));
765 MRI->replaceRegWith(SrcReg, DstReg);
767 I.removeFromParent();
768 return true;
769 } else if (I.getNumDefs() == 1) {
770
772 }
774 }
775
776 if (DeadMIs.contains(&I)) {
777
778
779 LLVM_DEBUG(dbgs() << "Instruction is folded and dead.\n");
780 removeDeadInstruction(I);
781 return true;
782 }
783
784 if (I.getNumOperands() != I.getNumExplicitOperands()) {
785 LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
786 return false;
787 }
788
789
790
791 bool HasDefs = I.getNumDefs() > 0;
792 Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
794 assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
795 I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
796 if (spvSelect(ResVReg, ResType, I)) {
797 if (HasDefs)
798 for (unsigned i = 0; i < I.getNumDefs(); ++i)
801 I.removeFromParent();
802 return true;
803 }
804 return false;
805}
806
808 switch (Opcode) {
809 case TargetOpcode::G_CONSTANT:
810 case TargetOpcode::G_FCONSTANT:
811 return false;
812 case TargetOpcode::G_SADDO:
813 case TargetOpcode::G_SSUBO:
814 return true;
815 }
817}
818
819bool SPIRVInstructionSelector::BuildCOPY(Register DestReg, Register SrcReg,
820 MachineInstr &I) const {
821 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(DestReg);
822 const TargetRegisterClass *SrcRC = MRI->getRegClassOrNull(SrcReg);
823 if (DstRC != SrcRC && SrcRC)
824 MRI->setRegClass(DestReg, SrcRC);
825 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
826 TII.get(TargetOpcode::COPY))
830}
831
832bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
834 MachineInstr &I) const {
835 const unsigned Opcode = I.getOpcode();
837 return selectImpl(I, *CoverageInfo);
838 switch (Opcode) {
839 case TargetOpcode::G_CONSTANT:
840 case TargetOpcode::G_FCONSTANT:
841 return selectConst(ResVReg, ResType, I);
842 case TargetOpcode::G_GLOBAL_VALUE:
843 return selectGlobalValue(ResVReg, I);
844 case TargetOpcode::G_IMPLICIT_DEF:
845 return selectOpUndef(ResVReg, ResType, I);
846 case TargetOpcode::G_FREEZE:
847 return selectFreeze(ResVReg, ResType, I);
848
849 case TargetOpcode::G_INTRINSIC:
850 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
851 case TargetOpcode::G_INTRINSIC_CONVERGENT:
852 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
853 return selectIntrinsic(ResVReg, ResType, I);
854 case TargetOpcode::G_BITREVERSE:
855 return selectBitreverse(ResVReg, ResType, I);
856
857 case TargetOpcode::G_BUILD_VECTOR:
858 return selectBuildVector(ResVReg, ResType, I);
859 case TargetOpcode::G_SPLAT_VECTOR:
860 return selectSplatVector(ResVReg, ResType, I);
861
862 case TargetOpcode::G_SHUFFLE_VECTOR: {
863 MachineBasicBlock &BB = *I.getParent();
864 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
867 .addUse(I.getOperand(1).getReg())
868 .addUse(I.getOperand(2).getReg());
869 for (auto V : I.getOperand(3).getShuffleMask())
872 }
873 case TargetOpcode::G_MEMMOVE:
874 case TargetOpcode::G_MEMCPY:
875 case TargetOpcode::G_MEMSET:
876 return selectMemOperation(ResVReg, I);
877
878 case TargetOpcode::G_ICMP:
879 return selectICmp(ResVReg, ResType, I);
880 case TargetOpcode::G_FCMP:
881 return selectFCmp(ResVReg, ResType, I);
882
883 case TargetOpcode::G_FRAME_INDEX:
884 return selectFrameIndex(ResVReg, ResType, I);
885
886 case TargetOpcode::G_LOAD:
887 return selectLoad(ResVReg, ResType, I);
888 case TargetOpcode::G_STORE:
889 return selectStore(I);
890
891 case TargetOpcode::G_BR:
892 return selectBranch(I);
893 case TargetOpcode::G_BRCOND:
894 return selectBranchCond(I);
895
896 case TargetOpcode::G_PHI:
897 return selectPhi(ResVReg, ResType, I);
898
899 case TargetOpcode::G_FPTOSI:
900 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
901 case TargetOpcode::G_FPTOUI:
902 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
903
904 case TargetOpcode::G_FPTOSI_SAT:
905 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
906 case TargetOpcode::G_FPTOUI_SAT:
907 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
908
909 case TargetOpcode::G_SITOFP:
910 return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
911 case TargetOpcode::G_UITOFP:
912 return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
913
914 case TargetOpcode::G_CTPOP:
915 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
916 case TargetOpcode::G_SMIN:
917 return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin);
918 case TargetOpcode::G_UMIN:
919 return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin);
920
921 case TargetOpcode::G_SMAX:
922 return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax);
923 case TargetOpcode::G_UMAX:
924 return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax);
925
926 case TargetOpcode::G_SCMP:
927 return selectSUCmp(ResVReg, ResType, I, true);
928 case TargetOpcode::G_UCMP:
929 return selectSUCmp(ResVReg, ResType, I, false);
930 case TargetOpcode::G_LROUND:
931 case TargetOpcode::G_LLROUND: {
933 MRI->createVirtualRegister(MRI->getRegClass(ResVReg), "lround");
934 MRI->setRegClass(regForLround, &SPIRV::iIDRegClass);
936 regForLround, *(I.getParent()->getParent()));
937 selectExtInstForLRound(regForLround, GR.getSPIRVTypeForVReg(regForLround),
938 I, CL::round, GL::Round);
939 MachineBasicBlock &BB = *I.getParent();
940 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConvertFToS))
943 .addUse(regForLround);
945 }
946 case TargetOpcode::G_STRICT_FMA:
947 case TargetOpcode::G_FMA:
948 return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma);
949
950 case TargetOpcode::G_STRICT_FLDEXP:
951 return selectExtInst(ResVReg, ResType, I, CL::ldexp);
952
953 case TargetOpcode::G_FPOW:
954 return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow);
955 case TargetOpcode::G_FPOWI:
956 return selectExtInst(ResVReg, ResType, I, CL::pown);
957
958 case TargetOpcode::G_FEXP:
959 return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
960 case TargetOpcode::G_FEXP2:
961 return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);
962 case TargetOpcode::G_FMODF:
963 return selectModf(ResVReg, ResType, I);
964
965 case TargetOpcode::G_FLOG:
966 return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
967 case TargetOpcode::G_FLOG2:
968 return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2);
969 case TargetOpcode::G_FLOG10:
970 return selectLog10(ResVReg, ResType, I);
971
972 case TargetOpcode::G_FABS:
973 return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs);
974 case TargetOpcode::G_ABS:
975 return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs);
976
977 case TargetOpcode::G_FMINNUM:
978 case TargetOpcode::G_FMINIMUM:
979 return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::NMin);
980 case TargetOpcode::G_FMAXNUM:
981 case TargetOpcode::G_FMAXIMUM:
982 return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::NMax);
983
984 case TargetOpcode::G_FCOPYSIGN:
985 return selectExtInst(ResVReg, ResType, I, CL::copysign);
986
987 case TargetOpcode::G_FCEIL:
988 return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil);
989 case TargetOpcode::G_FFLOOR:
990 return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor);
991
992 case TargetOpcode::G_FCOS:
993 return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos);
994 case TargetOpcode::G_FSIN:
995 return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin);
996 case TargetOpcode::G_FTAN:
997 return selectExtInst(ResVReg, ResType, I, CL::tan, GL::Tan);
998 case TargetOpcode::G_FACOS:
999 return selectExtInst(ResVReg, ResType, I, CL::acos, GL::Acos);
1000 case TargetOpcode::G_FASIN:
1001 return selectExtInst(ResVReg, ResType, I, CL::asin, GL::Asin);
1002 case TargetOpcode::G_FATAN:
1003 return selectExtInst(ResVReg, ResType, I, CL::atan, GL::Atan);
1004 case TargetOpcode::G_FATAN2:
1005 return selectExtInst(ResVReg, ResType, I, CL::atan2, GL::Atan2);
1006 case TargetOpcode::G_FCOSH:
1007 return selectExtInst(ResVReg, ResType, I, CL::cosh, GL::Cosh);
1008 case TargetOpcode::G_FSINH:
1009 return selectExtInst(ResVReg, ResType, I, CL::sinh, GL::Sinh);
1010 case TargetOpcode::G_FTANH:
1011 return selectExtInst(ResVReg, ResType, I, CL::tanh, GL::Tanh);
1012
1013 case TargetOpcode::G_STRICT_FSQRT:
1014 case TargetOpcode::G_FSQRT:
1015 return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt);
1016
1017 case TargetOpcode::G_CTTZ:
1018 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
1019 return selectExtInst(ResVReg, ResType, I, CL::ctz);
1020 case TargetOpcode::G_CTLZ:
1021 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
1022 return selectExtInst(ResVReg, ResType, I, CL::clz);
1023
1024 case TargetOpcode::G_INTRINSIC_ROUND:
1025 return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round);
1026 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
1027 return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
1028 case TargetOpcode::G_INTRINSIC_TRUNC:
1029 return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc);
1030 case TargetOpcode::G_FRINT:
1031 case TargetOpcode::G_FNEARBYINT:
1032 return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
1033
1034 case TargetOpcode::G_SMULH:
1035 return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi);
1036 case TargetOpcode::G_UMULH:
1037 return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi);
1038
1039 case TargetOpcode::G_SADDSAT:
1040 return selectExtInst(ResVReg, ResType, I, CL::s_add_sat);
1041 case TargetOpcode::G_UADDSAT:
1042 return selectExtInst(ResVReg, ResType, I, CL::u_add_sat);
1043 case TargetOpcode::G_SSUBSAT:
1044 return selectExtInst(ResVReg, ResType, I, CL::s_sub_sat);
1045 case TargetOpcode::G_USUBSAT:
1046 return selectExtInst(ResVReg, ResType, I, CL::u_sub_sat);
1047
1048 case TargetOpcode::G_FFREXP:
1049 return selectFrexp(ResVReg, ResType, I);
1050
1051 case TargetOpcode::G_UADDO:
1052 return selectOverflowArith(ResVReg, ResType, I,
1053 ResType->getOpcode() == SPIRV::OpTypeVector
1054 ? SPIRV::OpIAddCarryV
1055 : SPIRV::OpIAddCarryS);
1056 case TargetOpcode::G_USUBO:
1057 return selectOverflowArith(ResVReg, ResType, I,
1058 ResType->getOpcode() == SPIRV::OpTypeVector
1059 ? SPIRV::OpISubBorrowV
1060 : SPIRV::OpISubBorrowS);
1061 case TargetOpcode::G_UMULO:
1062 return selectOverflowArith(ResVReg, ResType, I, SPIRV::OpUMulExtended);
1063 case TargetOpcode::G_SMULO:
1064 return selectOverflowArith(ResVReg, ResType, I, SPIRV::OpSMulExtended);
1065
1066 case TargetOpcode::G_SEXT:
1067 return selectExt(ResVReg, ResType, I, true);
1068 case TargetOpcode::G_ANYEXT:
1069 case TargetOpcode::G_ZEXT:
1070 return selectExt(ResVReg, ResType, I, false);
1071 case TargetOpcode::G_TRUNC:
1072 return selectTrunc(ResVReg, ResType, I);
1073 case TargetOpcode::G_FPTRUNC:
1074 case TargetOpcode::G_FPEXT:
1075 return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
1076
1077 case TargetOpcode::G_PTRTOINT:
1078 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
1079 case TargetOpcode::G_INTTOPTR:
1080 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
1081 case TargetOpcode::G_BITCAST:
1082 return selectBitcast(ResVReg, ResType, I);
1083 case TargetOpcode::G_ADDRSPACE_CAST:
1084 return selectAddrSpaceCast(ResVReg, ResType, I);
1085 case TargetOpcode::G_PTR_ADD: {
1086
1087 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
1088 Register GV = I.getOperand(1).getReg();
1090 (void)II;
1091 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1092 (*II).getOpcode() == TargetOpcode::COPY ||
1093 (*II).getOpcode() == SPIRV::OpVariable) &&
1095
1096 bool IsGVInit = false;
1098 UseIt = MRI->use_instr_begin(I.getOperand(0).getReg()),
1099 UseEnd = MRI->use_instr_end();
1100 UseIt != UseEnd; UseIt = std::next(UseIt)) {
1101 if ((*UseIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1102 (*UseIt).getOpcode() == SPIRV::OpVariable) {
1103 IsGVInit = true;
1104 break;
1105 }
1106 }
1107 MachineBasicBlock &BB = *I.getParent();
1108 if (!IsGVInit) {
1112 if (GVPointeeType && ResPointeeType && GVPointeeType != ResPointeeType) {
1113
1114
1115 Register NewVReg = MRI->createGenericVirtualRegister(MRI->getType(GV));
1116 MRI->setRegClass(NewVReg, MRI->getRegClass(GV));
1117
1118
1119
1120
1121
1122
1125 "incompatible result and operand types in a bitcast");
1127 MachineInstrBuilder MIB =
1128 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitcast))
1135 ? SPIRV::OpInBoundsAccessChain
1136 : SPIRV::OpInBoundsPtrAccessChain))
1140 .addUse(I.getOperand(2).getReg())
1142 } else {
1143 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
1147 static_cast<uint32_t>(SPIRV::Opcode::InBoundsPtrAccessChain))
1149 .addUse(I.getOperand(2).getReg())
1151 }
1152 }
1153
1154
1155
1157 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
1160 .addImm(static_cast<uint32_t>(
1161 SPIRV::Opcode::InBoundsPtrAccessChain))
1164 .addUse(I.getOperand(2).getReg());
1166 }
1167
1168 case TargetOpcode::G_ATOMICRMW_OR:
1169 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
1170 case TargetOpcode::G_ATOMICRMW_ADD:
1171 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
1172 case TargetOpcode::G_ATOMICRMW_AND:
1173 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
1174 case TargetOpcode::G_ATOMICRMW_MAX:
1175 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
1176 case TargetOpcode::G_ATOMICRMW_MIN:
1177 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
1178 case TargetOpcode::G_ATOMICRMW_SUB:
1179 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
1180 case TargetOpcode::G_ATOMICRMW_XOR:
1181 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
1182 case TargetOpcode::G_ATOMICRMW_UMAX:
1183 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
1184 case TargetOpcode::G_ATOMICRMW_UMIN:
1185 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
1186 case TargetOpcode::G_ATOMICRMW_XCHG:
1187 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
1188 case TargetOpcode::G_ATOMIC_CMPXCHG:
1189 return selectAtomicCmpXchg(ResVReg, ResType, I);
1190
1191 case TargetOpcode::G_ATOMICRMW_FADD:
1192 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT);
1193 case TargetOpcode::G_ATOMICRMW_FSUB:
1194
1195 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT,
1196 ResType->getOpcode() == SPIRV::OpTypeVector
1197 ? SPIRV::OpFNegateV
1198 : SPIRV::OpFNegate);
1199 case TargetOpcode::G_ATOMICRMW_FMIN:
1200 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMinEXT);
1201 case TargetOpcode::G_ATOMICRMW_FMAX:
1202 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMaxEXT);
1203
1204 case TargetOpcode::G_FENCE:
1205 return selectFence(I);
1206
1207 case TargetOpcode::G_STACKSAVE:
1208 return selectStackSave(ResVReg, ResType, I);
1209 case TargetOpcode::G_STACKRESTORE:
1210 return selectStackRestore(I);
1211
1212 case TargetOpcode::G_UNMERGE_VALUES:
1214
1215
1216
1217
1218 case TargetOpcode::G_TRAP:
1219 case TargetOpcode::G_UBSANTRAP:
1220 case TargetOpcode::DBG_LABEL:
1221 return true;
1222 case TargetOpcode::G_DEBUGTRAP:
1223 return selectDebugTrap(ResVReg, ResType, I);
1224
1225 default:
1226 return false;
1227 }
1228}
1229
1230bool SPIRVInstructionSelector::selectDebugTrap(Register ResVReg,
1232 MachineInstr &I) const {
1233 unsigned Opcode = SPIRV::OpNop;
1234 MachineBasicBlock &BB = *I.getParent();
1237}
1238
1239bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
1241 MachineInstr &I,
1242 GL::GLSLExtInst GLInst) const {
1244 SPIRV::InstructionSet::InstructionSet::GLSL_std_450)) {
1245 std::string DiagMsg;
1246 raw_string_ostream OS(DiagMsg);
1247 I.print(OS, true, false, false, false);
1248 DiagMsg += " is only supported with the GLSL extended instruction set.\n";
1250 }
1251 return selectExtInst(ResVReg, ResType, I,
1252 {{SPIRV::InstructionSet::GLSL_std_450, GLInst}});
1253}
1254
1255bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
1257 MachineInstr &I,
1258 CL::OpenCLExtInst CLInst) const {
1259 return selectExtInst(ResVReg, ResType, I,
1260 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
1261}
1262
1263bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
1265 MachineInstr &I,
1266 CL::OpenCLExtInst CLInst,
1267 GL::GLSLExtInst GLInst) const {
1268 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
1269 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
1270 return selectExtInst(ResVReg, ResType, I, ExtInsts);
1271}
1272
1273bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
1275 MachineInstr &I,
1277
1278 for (const auto &Ex : Insts) {
1279 SPIRV::InstructionSet::InstructionSet Set = Ex.first;
1280 uint32_t Opcode = Ex.second;
1282 MachineBasicBlock &BB = *I.getParent();
1283 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1286 .addImm(static_cast<uint32_t>(Set))
1289 const unsigned NumOps = I.getNumOperands();
1290 unsigned Index = 1;
1291 if (Index < NumOps &&
1292 I.getOperand(Index).getType() ==
1293 MachineOperand::MachineOperandType::MO_IntrinsicID)
1296 MIB.add(I.getOperand(Index));
1298 }
1299 }
1300 return false;
1301}
1302bool SPIRVInstructionSelector::selectExtInstForLRound(
1304 CL::OpenCLExtInst CLInst, GL::GLSLExtInst GLInst) const {
1305 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
1306 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
1307 return selectExtInstForLRound(ResVReg, ResType, I, ExtInsts);
1308}
1309
1310bool SPIRVInstructionSelector::selectExtInstForLRound(
1313 for (const auto &Ex : Insts) {
1314 SPIRV::InstructionSet::InstructionSet Set = Ex.first;
1315 uint32_t Opcode = Ex.second;
1317 MachineBasicBlock &BB = *I.getParent();
1318 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1321 .addImm(static_cast<uint32_t>(Set))
1323 const unsigned NumOps = I.getNumOperands();
1324 unsigned Index = 1;
1325 if (Index < NumOps &&
1326 I.getOperand(Index).getType() ==
1327 MachineOperand::MachineOperandType::MO_IntrinsicID)
1330 MIB.add(I.getOperand(Index));
1332 return true;
1333 }
1334 }
1335 return false;
1336}
1337
1338bool SPIRVInstructionSelector::selectFrexp(Register ResVReg,
1340 MachineInstr &I) const {
1341 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CL::frexp},
1342 {SPIRV::InstructionSet::GLSL_std_450, GL::Frexp}};
1343 for (const auto &Ex : ExtInsts) {
1344 SPIRV::InstructionSet::InstructionSet Set = Ex.first;
1345 uint32_t Opcode = Ex.second;
1347 continue;
1348
1349 MachineIRBuilder MIRBuilder(I);
1352 PointeeTy, MIRBuilder, SPIRV::StorageClass::Function);
1355
1357 auto MIB = BuildMI(*It->getParent(), It, It->getDebugLoc(),
1358 TII.get(SPIRV::OpVariable))
1359 .addDef(PointerVReg)
1361 .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
1363
1364 MIB = MIB &
1365 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1368 .addImm(static_cast<uint32_t>(Ex.first))
1371 .addUse(PointerVReg)
1373
1374 MIB = MIB &
1375 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
1376 .addDef(I.getOperand(1).getReg())
1378 .addUse(PointerVReg)
1380 return MIB;
1381 }
1382 return false;
1383}
1384
1385bool SPIRVInstructionSelector::selectOpWithSrcs(Register ResVReg,
1387 MachineInstr &I,
1388 std::vector Srcs,
1389 unsigned Opcode) const {
1390 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1393 for (Register SReg : Srcs) {
1395 }
1397}
1398
1399bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
1401 MachineInstr &I,
1402 unsigned Opcode) const {
1404 Register SrcReg = I.getOperand(1).getReg();
1405 bool IsGV = false;
1407 MRI->def_instr_begin(SrcReg);
1408 DefIt != MRI->def_instr_end(); DefIt = std::next(DefIt)) {
1409 unsigned DefOpCode = DefIt->getOpcode();
1410 if (DefOpCode == SPIRV::ASSIGN_TYPE) {
1411
1412
1413 if (auto *VRD = getVRegDef(*MRI, DefIt->getOperand(1).getReg()))
1414 DefOpCode = VRD->getOpcode();
1415 }
1416 if (DefOpCode == TargetOpcode::G_GLOBAL_VALUE ||
1417 DefOpCode == TargetOpcode::G_CONSTANT ||
1418 DefOpCode == SPIRV::OpVariable || DefOpCode == SPIRV::OpConstantI) {
1419 IsGV = true;
1420 break;
1421 }
1422 }
1423 if (IsGV) {
1424 uint32_t SpecOpcode = 0;
1425 switch (Opcode) {
1426 case SPIRV::OpConvertPtrToU:
1427 SpecOpcode = static_cast<uint32_t>(SPIRV::Opcode::ConvertPtrToU);
1428 break;
1429 case SPIRV::OpConvertUToPtr:
1430 SpecOpcode = static_cast<uint32_t>(SPIRV::Opcode::ConvertUToPtr);
1431 break;
1432 }
1433 if (SpecOpcode)
1434 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1435 TII.get(SPIRV::OpSpecConstantOp))
1441 }
1442 }
1443 return selectOpWithSrcs(ResVReg, ResType, I, {I.getOperand(1).getReg()},
1444 Opcode);
1445}
1446
1447bool SPIRVInstructionSelector::selectBitcast(Register ResVReg,
1449 MachineInstr &I) const {
1450 Register OpReg = I.getOperand(1).getReg();
1453 report_fatal_error("incompatible result and operand types in a bitcast");
1454 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
1455}
1456
1461 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
1462 if (MemOp->isVolatile())
1463 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1464 if (MemOp->isNonTemporal())
1465 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1466 if (MemOp->getAlign().value())
1467 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
1468
1469 [[maybe_unused]] MachineInstr *AliasList = nullptr;
1470 [[maybe_unused]] MachineInstr *NoAliasList = nullptr;
1472 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1473 if (ST->canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing)) {
1474 if (auto *MD = MemOp->getAAInfo().Scope) {
1476 if (AliasList)
1477 SpvMemOp |=
1478 static_cast<uint32_t>(SPIRV::MemoryOperand::AliasScopeINTELMask);
1479 }
1480 if (auto *MD = MemOp->getAAInfo().NoAlias) {
1482 if (NoAliasList)
1483 SpvMemOp |=
1484 static_cast<uint32_t>(SPIRV::MemoryOperand::NoAliasINTELMask);
1485 }
1486 }
1487
1488 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
1489 MIB.addImm(SpvMemOp);
1490 if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
1492 if (AliasList)
1494 if (NoAliasList)
1496 }
1497}
1498
1500 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
1502 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1504 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1505
1506 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
1507 MIB.addImm(SpvMemOp);
1508}
1509
1510bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
1512 MachineInstr &I) const {
1514 Register Ptr = I.getOperand(1 + OpOffset).getReg();
1515
1518 if (IntPtrDef &&
1519 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1520 Register HandleReg = IntPtrDef->getOperand(2).getReg();
1522 if (HandleType->getOpcode() == SPIRV::OpTypeImage) {
1524 MRI->createVirtualRegister(MRI->getRegClass(HandleReg));
1526 if (!loadHandleBeforePosition(NewHandleReg, HandleType, *HandleDef, I)) {
1527 return false;
1528 }
1529
1530 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1531 return generateImageReadOrFetch(ResVReg, ResType, NewHandleReg, IdxReg,
1533 }
1534 }
1535
1536 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
1540 if (.getNumMemOperands()) {
1541 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1542 I.getOpcode() ==
1543 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1545 } else {
1546 MachineIRBuilder MIRBuilder(I);
1548 }
1550}
1551
1552bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
1554 Register StoreVal = I.getOperand(0 + OpOffset).getReg();
1555 Register Ptr = I.getOperand(1 + OpOffset).getReg();
1556
1559 if (IntPtrDef &&
1560 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1561 Register HandleReg = IntPtrDef->getOperand(2).getReg();
1563 MRI->createVirtualRegister(MRI->getRegClass(HandleReg));
1566 if (!loadHandleBeforePosition(NewHandleReg, HandleType, *HandleDef, I)) {
1567 return false;
1568 }
1569
1570 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1571 if (HandleType->getOpcode() == SPIRV::OpTypeImage) {
1572 auto BMI = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1573 TII.get(SPIRV::OpImageWrite))
1574 .addUse(NewHandleReg)
1577
1579 if (sampledTypeIsSignedInteger(LLVMHandleType))
1580 BMI.addImm(0x1000);
1581
1582 return BMI.constrainAllUses(TII, TRI, RBI);
1583 }
1584 }
1585
1586 MachineBasicBlock &BB = *I.getParent();
1587 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
1590 if (.getNumMemOperands()) {
1591 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1592 I.getOpcode() ==
1593 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1595 } else {
1596 MachineIRBuilder MIRBuilder(I);
1598 }
1600}
1601
1602bool SPIRVInstructionSelector::selectStackSave(Register ResVReg,
1604 MachineInstr &I) const {
1605 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1607 "llvm.stacksave intrinsic: this instruction requires the following "
1608 "SPIR-V extension: SPV_INTEL_variable_length_array",
1609 false);
1610 MachineBasicBlock &BB = *I.getParent();
1611 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSaveMemoryINTEL))
1615}
1616
1617bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &I) const {
1618 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1620 "llvm.stackrestore intrinsic: this instruction requires the following "
1621 "SPIR-V extension: SPV_INTEL_variable_length_array",
1622 false);
1623 if (.getOperand(0).isReg())
1624 return false;
1625 MachineBasicBlock &BB = *I.getParent();
1626 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpRestoreMemoryINTEL))
1627 .addUse(I.getOperand(0).getReg())
1629}
1630
1632SPIRVInstructionSelector::getOrCreateMemSetGlobal(MachineInstr &I) const {
1633 MachineIRBuilder MIRBuilder(I);
1634 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
1635
1636
1639 Type *LLVMArrTy =
1641 GlobalVariable *GV = new GlobalVariable(*CurFunction.getParent(), LLVMArrTy,
1644
1645 Type *ValTy = Type::getInt8Ty(I.getMF()->getFunction().getContext());
1646 Type *ArrTy = ArrayType::get(ValTy, Num);
1648 ArrTy, MIRBuilder, SPIRV::StorageClass::UniformConstant);
1649
1651 ArrTy, MIRBuilder, SPIRV::AccessQualifier::None, false);
1652
1655
1657 auto MIBVar =
1658 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
1661 .addImm(SPIRV::StorageClass::UniformConstant)
1663 if (!MIBVar.constrainAllUses(TII, TRI, RBI))
1665
1666 GR.add(GV, MIBVar);
1668
1670 return VarReg;
1671}
1672
1673bool SPIRVInstructionSelector::selectCopyMemory(MachineInstr &I,
1675 MachineBasicBlock &BB = *I.getParent();
1676 Register DstReg = I.getOperand(0).getReg();
1680 report_fatal_error("OpCopyMemory requires operands to have the same type");
1681 uint64_t CopySize = getIConstVal(I.getOperand(2).getReg(), MRI);
1684 if (!LLVMPointeeTy)
1686 "Unable to determine pointee type size for OpCopyMemory");
1687 const DataLayout &DL = I.getMF()->getFunction().getDataLayout();
1688 if (CopySize != DL.getTypeStoreSize(const_cast<Type *>(LLVMPointeeTy)))
1690 "OpCopyMemory requires the size to match the pointee type size");
1691 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemory))
1694 if (I.getNumMemOperands()) {
1695 MachineIRBuilder MIRBuilder(I);
1697 }
1699}
1700
1701bool SPIRVInstructionSelector::selectCopyMemorySized(MachineInstr &I,
1703 MachineBasicBlock &BB = *I.getParent();
1704 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
1705 .addUse(I.getOperand(0).getReg())
1707 .addUse(I.getOperand(2).getReg());
1708 if (I.getNumMemOperands()) {
1709 MachineIRBuilder MIRBuilder(I);
1711 }
1713}
1714
1715bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
1716 MachineInstr &I) const {
1717 Register SrcReg = I.getOperand(1).getReg();
1719 if (I.getOpcode() == TargetOpcode::G_MEMSET) {
1720 Register VarReg = getOrCreateMemSetGlobal(I);
1722 return false;
1723 Type *ValTy = Type::getInt8Ty(I.getMF()->getFunction().getContext());
1725 ValTy, I, SPIRV::StorageClass::UniformConstant);
1726 SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
1727 Result &= selectOpWithSrcs(SrcReg, SourceTy, I, {VarReg}, SPIRV::OpBitcast);
1728 }
1730 Result &= selectCopyMemory(I, SrcReg);
1731 } else {
1732 Result &= selectCopyMemorySized(I, SrcReg);
1733 }
1734 if (ResVReg.isValid() && ResVReg != I.getOperand(0).getReg())
1735 Result &= BuildCOPY(ResVReg, I.getOperand(0).getReg(), I);
1737}
1738
1739bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
1741 MachineInstr &I,
1742 unsigned NewOpcode,
1743 unsigned NegateOpcode) const {
1745 assert(I.hasOneMemOperand());
1746 const MachineMemOperand *MemOp = *I.memoperands_begin();
1749 auto ScopeConstant = buildI32Constant(Scope, I);
1750 Register ScopeReg = ScopeConstant.first;
1751 Result &= ScopeConstant.second;
1752
1753 Register Ptr = I.getOperand(1).getReg();
1754
1755
1756
1758 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
1759 auto MemSemConstant = buildI32Constant(MemSem , I);
1760 Register MemSemReg = MemSemConstant.first;
1761 Result &= MemSemConstant.second;
1762
1763 Register ValueReg = I.getOperand(2).getReg();
1764 if (NegateOpcode != 0) {
1765
1767 Result &= selectOpWithSrcs(TmpReg, ResType, I, {ValueReg}, NegateOpcode);
1768 ValueReg = TmpReg;
1769 }
1770
1772 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
1780}
1781
1782bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
1783 unsigned ArgI = I.getNumOperands() - 1;
1785 I.getOperand(ArgI).isReg() ? I.getOperand(ArgI).getReg() : Register(0);
1788 if (!SrcType || SrcType->getOpcode() != SPIRV::OpTypeVector)
1790 "cannot select G_UNMERGE_VALUES with a non-vector argument");
1791
1794 MachineBasicBlock &BB = *I.getParent();
1795 bool Res = false;
1796 unsigned CurrentIndex = 0;
1797 for (unsigned i = 0; i < I.getNumDefs(); ++i) {
1798 Register ResVReg = I.getOperand(i).getReg();
1800 if (!ResType) {
1801 LLT ResLLT = MRI->getType(ResVReg);
1806 } else {
1807 ResType = ScalarType;
1808 }
1811 }
1812
1813 if (ResType->getOpcode() == SPIRV::OpTypeVector) {
1815 auto MIB =
1816 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
1822 for (unsigned j = 0; j < NumElements; ++j) {
1823 MIB.addImm(CurrentIndex + j);
1824 }
1825 CurrentIndex += NumElements;
1827 } else {
1828 auto MIB =
1829 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1833 .addImm(CurrentIndex);
1834 CurrentIndex++;
1836 }
1837 }
1838 return Res;
1839}
1840
1841bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
1843 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
1844 auto MemSemConstant = buildI32Constant(MemSem, I);
1845 Register MemSemReg = MemSemConstant.first;
1846 bool Result = MemSemConstant.second;
1848 uint32_t Scope = static_cast<uint32_t>(
1850 auto ScopeConstant = buildI32Constant(Scope, I);
1851 Register ScopeReg = ScopeConstant.first;
1852 Result &= ScopeConstant.second;
1853 MachineBasicBlock &BB = *I.getParent();
1855 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
1859}
1860
1861bool SPIRVInstructionSelector::selectOverflowArith(Register ResVReg,
1863 MachineInstr &I,
1864 unsigned Opcode) const {
1865 Type *ResTy = nullptr;
1866 StringRef ResName;
1869 "Not enough info to select the arithmetic with overflow instruction");
1872 "with overflow instruction");
1873
1874
1877
1878 MachineIRBuilder MIRBuilder(I);
1880 ResTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
1881 assert(I.getNumDefs() > 1 && "Not enought operands");
1884 if (N > 1)
1887 Register ZeroReg = buildZerosVal(ResType, I);
1888
1890 MRI->setRegClass(StructVReg, &SPIRV::IDRegClass);
1891
1892 if (ResName.size() > 0)
1893 buildOpName(StructVReg, ResName, MIRBuilder);
1894
1895 MachineBasicBlock &BB = *I.getParent();
1896 auto MIB =
1897 BuildMI(BB, MIRBuilder.getInsertPt(), I.getDebugLoc(), TII.get(Opcode))
1900 for (unsigned i = I.getNumDefs(); i < I.getNumOperands(); ++i)
1901 MIB.addUse(I.getOperand(i).getReg());
1903
1904
1906 MRI->setRegClass(HigherVReg, &SPIRV::iIDRegClass);
1907 for (unsigned i = 0; i < I.getNumDefs(); ++i) {
1908 auto MIB =
1909 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1910 .addDef(i == 1 ? HigherVReg : I.getOperand(i).getReg())
1915 }
1916
1918 .addDef(I.getOperand(1).getReg())
1919 .addUse(BoolTypeReg)
1923}
1924
1925bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
1927 MachineInstr &I) const {
1932 Register Ptr = I.getOperand(2).getReg();
1934 assert(I.hasOneMemOperand());
1935 const MachineMemOperand *MemOp = *I.memoperands_begin();
1938 auto ScopeConstant = buildI32Constant(Scope, I);
1939 ScopeReg = ScopeConstant.first;
1940 Result &= ScopeConstant.second;
1941
1942 unsigned ScSem = static_cast<uint32_t>(
1945 unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
1946 auto MemSemEqConstant = buildI32Constant(MemSemEq, I);
1947 MemSemEqReg = MemSemEqConstant.first;
1948 Result &= MemSemEqConstant.second;
1950 unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
1951 if (MemSemEq == MemSemNeq)
1952 MemSemNeqReg = MemSemEqReg;
1953 else {
1954 auto MemSemNeqConstant = buildI32Constant(MemSemEq, I);
1955 MemSemNeqReg = MemSemNeqConstant.first;
1956 Result &= MemSemNeqConstant.second;
1957 }
1958 } else {
1959 ScopeReg = I.getOperand(5).getReg();
1960 MemSemEqReg = I.getOperand(6).getReg();
1961 MemSemNeqReg = I.getOperand(7).getReg();
1962 }
1963
1965 Register Val = I.getOperand(4).getReg();
1970 BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
1975 .addUse(MemSemEqReg)
1976 .addUse(MemSemNeqReg)
1997 BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
2004}
2005
2007 switch (SC) {
2008 case SPIRV::StorageClass::DeviceOnlyINTEL:
2009 case SPIRV::StorageClass::HostOnlyINTEL:
2010 return true;
2011 default:
2012 return false;
2013 }
2014}
2015
2016
2018 bool IsGRef = false;
2019 bool IsAllowedRefs =
2020 llvm::all_of(MRI->use_instructions(ResVReg), [&IsGRef](auto const &It) {
2021 unsigned Opcode = It.getOpcode();
2022 if (Opcode == SPIRV::OpConstantComposite ||
2023 Opcode == SPIRV::OpVariable ||
2024 isSpvIntrinsic(It, Intrinsic::spv_init_global))
2025 return IsGRef = true;
2026 return Opcode == SPIRV::OpName;
2027 });
2028 return IsAllowedRefs && IsGRef;
2029}
2030
2031Register SPIRVInstructionSelector::getUcharPtrTypeReg(
2032 MachineInstr &I, SPIRV::StorageClass::StorageClass SC) const {
2034 Type::getInt8Ty(I.getMF()->getFunction().getContext()), I, SC));
2035}
2036
2037MachineInstrBuilder
2038SPIRVInstructionSelector::buildSpecConstantOp(MachineInstr &I, Register Dest,
2040 uint32_t Opcode) const {
2041 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
2042 TII.get(SPIRV::OpSpecConstantOp))
2047}
2048
2049MachineInstrBuilder
2050SPIRVInstructionSelector::buildConstGenericPtr(MachineInstr &I, Register SrcPtr,
2054 Register Tmp = MRI->createVirtualRegister(&SPIRV::pIDRegClass);
2056 SPIRV::StorageClass::Generic),
2058 MachineFunction *MF = I.getParent()->getParent();
2060 MachineInstrBuilder MIB = buildSpecConstantOp(
2062 static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric));
2064 return MIB;
2065}
2066
2067
2068
2069
2070
2071
2072bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
2074 MachineInstr &I) const {
2075 MachineBasicBlock &BB = *I.getParent();
2077
2078 Register SrcPtr = I.getOperand(1).getReg();
2080
2081
2082 if (SrcPtrTy->getOpcode() != SPIRV::OpTypePointer ||
2083 ResType->getOpcode() != SPIRV::OpTypePointer)
2084 return BuildCOPY(ResVReg, SrcPtr, I);
2085
2088
2090
2091
2092
2093
2094 unsigned SpecOpcode =
2096 ? static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric)
2099 ? static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr)
2100 : 0);
2101
2102
2103
2104
2105 if (SpecOpcode) {
2106 return buildSpecConstantOp(I, ResVReg, SrcPtr,
2107 getUcharPtrTypeReg(I, DstSC), SpecOpcode)
2108 .constrainAllUses(TII, TRI, RBI);
2110 MachineInstrBuilder MIB = buildConstGenericPtr(I, SrcPtr, SrcPtrTy);
2112 buildSpecConstantOp(
2114 getUcharPtrTypeReg(I, DstSC),
2115 static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr))
2116 .constrainAllUses(TII, TRI, RBI);
2117 }
2118 }
2119
2120
2121 if (SrcSC == DstSC)
2122 return BuildCOPY(ResVReg, SrcPtr, I);
2123
2124 if ((SrcSC == SPIRV::StorageClass::Function &&
2125 DstSC == SPIRV::StorageClass::Private) ||
2126 (DstSC == SPIRV::StorageClass::Function &&
2127 SrcSC == SPIRV::StorageClass::Private))
2128 return BuildCOPY(ResVReg, SrcPtr, I);
2129
2130
2132 return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
2133
2135 return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
2136
2151 }
2152
2153
2154
2155 if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::CrossWorkgroup)
2156 return selectUnOp(ResVReg, ResType, I,
2157 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
2158 if (SrcSC == SPIRV::StorageClass::CrossWorkgroup && isUSMStorageClass(DstSC))
2159 return selectUnOp(ResVReg, ResType, I,
2160 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
2161 if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::Generic)
2162 return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
2163 if (SrcSC == SPIRV::StorageClass::Generic && isUSMStorageClass(DstSC))
2164 return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
2165
2166
2167 return false;
2168}
2169
2172 switch (Pred) {
2174 return SPIRV::OpFOrdEqual;
2176 return SPIRV::OpFOrdGreaterThanEqual;
2178 return SPIRV::OpFOrdGreaterThan;
2180 return SPIRV::OpFOrdLessThanEqual;
2182 return SPIRV::OpFOrdLessThan;
2184 return SPIRV::OpFOrdNotEqual;
2186 return SPIRV::OpOrdered;
2188 return SPIRV::OpFUnordEqual;
2190 return SPIRV::OpFUnordGreaterThanEqual;
2192 return SPIRV::OpFUnordGreaterThan;
2194 return SPIRV::OpFUnordLessThanEqual;
2196 return SPIRV::OpFUnordLessThan;
2198 return SPIRV::OpFUnordNotEqual;
2200 return SPIRV::OpUnordered;
2201 default:
2203 }
2204}
2205
2208 switch (Pred) {
2210 return SPIRV::OpIEqual;
2212 return SPIRV::OpINotEqual;
2214 return SPIRV::OpSGreaterThanEqual;
2216 return SPIRV::OpSGreaterThan;
2218 return SPIRV::OpSLessThanEqual;
2220 return SPIRV::OpSLessThan;
2222 return SPIRV::OpUGreaterThanEqual;
2224 return SPIRV::OpUGreaterThan;
2226 return SPIRV::OpULessThanEqual;
2228 return SPIRV::OpULessThan;
2229 default:
2231 }
2232}
2233
2237 return SPIRV::OpPtrEqual;
2239 return SPIRV::OpPtrNotEqual;
2240 default:
2241 llvm_unreachable("Unknown predicate type for pointer comparison");
2242 }
2243}
2244
2245
2248 switch (Pred) {
2250 return SPIRV::OpLogicalEqual;
2252 return SPIRV::OpLogicalNotEqual;
2253 default:
2254 llvm_unreachable("Unknown predicate type for Bool comparison");
2255 }
2256}
2257
2259 if (!LLVMFloatTy)
2264 default:
2269 }
2270}
2271
2273 if (!LLVMFloatTy)
2278 default:
2283 }
2284}
2285
2286bool SPIRVInstructionSelector::selectAnyOrAll(Register ResVReg,
2288 MachineInstr &I,
2289 unsigned OpAnyOrAll) const {
2290 assert(I.getNumOperands() == 3);
2291 assert(I.getOperand(2).isReg());
2292 MachineBasicBlock &BB = *I.getParent();
2293 Register InputRegister = I.getOperand(2).getReg();
2295
2296 if (!InputType)
2298
2300 bool IsVectorTy = InputType->getOpcode() == SPIRV::OpTypeVector;
2301 if (IsBoolTy && !IsVectorTy) {
2302 assert(ResVReg == I.getOperand(0).getReg());
2303 return BuildCOPY(ResVReg, InputRegister, I);
2304 }
2305
2307 unsigned SpirvNotEqualId =
2308 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
2310 SPIRVType *SpvBoolTy = SpvBoolScalarTy;
2311 Register NotEqualReg = ResVReg;
2312
2313 if (IsVectorTy) {
2314 NotEqualReg =
2315 IsBoolTy ? InputRegister
2319 }
2320
2322 if (!IsBoolTy) {
2324 IsFloatTy ? buildZerosValF(InputType, I) : buildZerosVal(InputType, I);
2325
2327 .addDef(NotEqualReg)
2329 .addUse(InputRegister)
2330 .addUse(ConstZeroReg)
2332 }
2333
2334 if (!IsVectorTy)
2336
2340 .addUse(NotEqualReg)
2342}
2343
2344bool SPIRVInstructionSelector::selectAll(Register ResVReg,
2346 MachineInstr &I) const {
2347 return selectAnyOrAll(ResVReg, ResType, I, SPIRV::OpAll);
2348}
2349
2350bool SPIRVInstructionSelector::selectAny(Register ResVReg,
2352 MachineInstr &I) const {
2353 return selectAnyOrAll(ResVReg, ResType, I, SPIRV::OpAny);
2354}
2355
2356
2357bool SPIRVInstructionSelector::selectFloatDot(Register ResVReg,
2359 MachineInstr &I) const {
2360 assert(I.getNumOperands() == 4);
2361 assert(I.getOperand(2).isReg());
2362 assert(I.getOperand(3).isReg());
2363
2364 [[maybe_unused]] SPIRVType *VecType =
2366
2369 "dot product requires a vector of at least 2 components");
2370
2371 [[maybe_unused]] SPIRVType *EltType =
2373
2375
2376 MachineBasicBlock &BB = *I.getParent();
2377 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpDot))
2380 .addUse(I.getOperand(2).getReg())
2381 .addUse(I.getOperand(3).getReg())
2383}
2384
2385bool SPIRVInstructionSelector::selectIntegerDot(Register ResVReg,
2387 MachineInstr &I,
2388 bool Signed) const {
2389 assert(I.getNumOperands() == 4);
2390 assert(I.getOperand(2).isReg());
2391 assert(I.getOperand(3).isReg());
2392 MachineBasicBlock &BB = *I.getParent();
2393
2394 auto DotOp = Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
2398 .addUse(I.getOperand(2).getReg())
2399 .addUse(I.getOperand(3).getReg())
2401}
2402
2403
2404
2405bool SPIRVInstructionSelector::selectIntegerDotExpansion(
2406 Register ResVReg, const SPIRVType *ResType, MachineInstr &I) const {
2407 assert(I.getNumOperands() == 4);
2408 assert(I.getOperand(2).isReg());
2409 assert(I.getOperand(3).isReg());
2410 MachineBasicBlock &BB = *I.getParent();
2411
2412
2413 Register Vec0 = I.getOperand(2).getReg();
2414 Register Vec1 = I.getOperand(3).getReg();
2417
2424
2427 "dot product requires a vector of at least 2 components");
2428
2436
2439
2441 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
2447
2449 ? MRI->createVirtualRegister(GR.getRegClass(ResType))
2450 : ResVReg;
2451
2458 Res = Sum;
2459 }
2460
2462}
2463
2464bool SPIRVInstructionSelector::selectOpIsInf(Register ResVReg,
2466 MachineInstr &I) const {
2467 MachineBasicBlock &BB = *I.getParent();
2468 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIsInf))
2471 .addUse(I.getOperand(2).getReg())
2473}
2474
2475bool SPIRVInstructionSelector::selectOpIsNan(Register ResVReg,
2477 MachineInstr &I) const {
2478 MachineBasicBlock &BB = *I.getParent();
2479 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIsNan))
2482 .addUse(I.getOperand(2).getReg())
2484}
2485
2486template
2487bool SPIRVInstructionSelector::selectDot4AddPacked(Register ResVReg,
2489 MachineInstr &I) const {
2490 assert(I.getNumOperands() == 5);
2491 assert(I.getOperand(2).isReg());
2492 assert(I.getOperand(3).isReg());
2493 assert(I.getOperand(4).isReg());
2494 MachineBasicBlock &BB = *I.getParent();
2495
2496 Register Acc = I.getOperand(2).getReg();
2497 Register X = I.getOperand(3).getReg();
2498 Register Y = I.getOperand(4).getReg();
2499
2500 auto DotOp = Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
2508
2515}
2516
2517
2518
2519
2520template
2521bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
2522 Register ResVReg, const SPIRVType *ResType, MachineInstr &I) const {
2523 assert(I.getNumOperands() == 5);
2524 assert(I.getOperand(2).isReg());
2525 assert(I.getOperand(3).isReg());
2526 assert(I.getOperand(4).isReg());
2527 MachineBasicBlock &BB = *I.getParent();
2528
2530
2531 Register Acc = I.getOperand(2).getReg();
2532 Register X = I.getOperand(3).getReg();
2533 Register Y = I.getOperand(4).getReg();
2534
2536 auto ExtractOp =
2537 Signed ? SPIRV::OpBitFieldSExtract : SPIRV::OpBitFieldUExtract;
2538
2539 bool ZeroAsNull = !STI.isShader();
2540
2541 for (unsigned i = 0; i < 4; i++) {
2542
2543 Register AElt = MRI->createVirtualRegister(&SPIRV::IDRegClass);
2552
2553
2554 Register BElt = MRI->createVirtualRegister(&SPIRV::IDRegClass);
2563
2564
2565 Register Mul = MRI->createVirtualRegister(&SPIRV::IDRegClass);
2572
2573
2574 Register MaskMul = MRI->createVirtualRegister(&SPIRV::IDRegClass);
2583
2584
2586 i < 3 ? MRI->createVirtualRegister(&SPIRV::IDRegClass) : ResVReg;
2593
2594 Acc = Sum;
2595 }
2596
2598}
2599
2600
2601
2602bool SPIRVInstructionSelector::selectSaturate(Register ResVReg,
2604 MachineInstr &I) const {
2605 assert(I.getNumOperands() == 3);
2606 assert(I.getOperand(2).isReg());
2607 MachineBasicBlock &BB = *I.getParent();
2608 Register VZero = buildZerosValF(ResType, I);
2609 Register VOne = buildOnesValF(ResType, I);
2610
2611 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
2614 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2616 .addUse(I.getOperand(2).getReg())
2620}
2621
2622bool SPIRVInstructionSelector::selectSign(Register ResVReg,
2624 MachineInstr &I) const {
2625 assert(I.getNumOperands() == 3);
2626 assert(I.getOperand(2).isReg());
2627 MachineBasicBlock &BB = *I.getParent();
2628 Register InputRegister = I.getOperand(2).getReg();
2630 auto &DL = I.getDebugLoc();
2631
2632 if (!InputType)
2634
2636
2639
2640 bool NeedsConversion = IsFloatTy || SignBitWidth != ResBitWidth;
2641
2642 auto SignOpcode = IsFloatTy ? GL::FSign : GL::SSign;
2643 Register SignReg = NeedsConversion
2644 ? MRI->createVirtualRegister(&SPIRV::IDRegClass)
2645 : ResVReg;
2646
2651 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2653 .addUse(InputRegister)
2655
2656 if (NeedsConversion) {
2657 auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
2663 }
2664
2666}
2667
2668bool SPIRVInstructionSelector::selectWaveOpInst(Register ResVReg,
2670 MachineInstr &I,
2671 unsigned Opcode) const {
2672 MachineBasicBlock &BB = *I.getParent();
2674
2675 auto BMI = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
2680
2681 for (unsigned J = 2; J < I.getNumOperands(); J++) {
2682 BMI.addUse(I.getOperand(J).getReg());
2683 }
2684
2686}
2687
2688bool SPIRVInstructionSelector::selectWaveActiveCountBits(
2689 Register ResVReg, const SPIRVType *ResType, MachineInstr &I) const {
2690
2694 bool Result = selectWaveOpInst(BallotReg, BallotType, I,
2695 SPIRV::OpGroupNonUniformBallot);
2696
2697 MachineBasicBlock &BB = *I.getParent();
2699 TII.get(SPIRV::OpGroupNonUniformBallotBitCount))
2704 .addImm(SPIRV::GroupOperation::Reduce)
2707
2709}
2710
2711bool SPIRVInstructionSelector::selectWaveReduceMax(Register ResVReg,
2713 MachineInstr &I,
2714 bool IsUnsigned) const {
2715 assert(I.getNumOperands() == 3);
2716 assert(I.getOperand(2).isReg());
2717 MachineBasicBlock &BB = *I.getParent();
2718 Register InputRegister = I.getOperand(2).getReg();
2720
2721 if (!InputType)
2723
2725
2727 auto IntegerOpcodeType =
2728 IsUnsigned ? SPIRV::OpGroupNonUniformUMax : SPIRV::OpGroupNonUniformSMax;
2729 auto Opcode = IsFloatTy ? SPIRV::OpGroupNonUniformFMax : IntegerOpcodeType;
2735 .addImm(SPIRV::GroupOperation::Reduce)
2736 .addUse(I.getOperand(2).getReg())
2738}
2739
2740bool SPIRVInstructionSelector::selectWaveReduceMin(Register ResVReg,
2742 MachineInstr &I,
2743 bool IsUnsigned) const {
2744 assert(I.getNumOperands() == 3);
2745 assert(I.getOperand(2).isReg());
2746 MachineBasicBlock &BB = *I.getParent();
2747 Register InputRegister = I.getOperand(2).getReg();
2749
2750 if (!InputType)
2752
2754
2756 auto IntegerOpcodeType =
2757 IsUnsigned ? SPIRV::OpGroupNonUniformUMin : SPIRV::OpGroupNonUniformSMin;
2758 auto Opcode = IsFloatTy ? SPIRV::OpGroupNonUniformFMin : IntegerOpcodeType;
2764 .addImm(SPIRV::GroupOperation::Reduce)
2765 .addUse(I.getOperand(2).getReg())
2767}
2768
2769bool SPIRVInstructionSelector::selectWaveReduceSum(Register ResVReg,
2771 MachineInstr &I) const {
2772 assert(I.getNumOperands() == 3);
2773 assert(I.getOperand(2).isReg());
2774 MachineBasicBlock &BB = *I.getParent();
2775 Register InputRegister = I.getOperand(2).getReg();
2777
2778 if (!InputType)
2780
2782
2784 auto Opcode =
2785 IsFloatTy ? SPIRV::OpGroupNonUniformFAdd : SPIRV::OpGroupNonUniformIAdd;
2791 .addImm(SPIRV::GroupOperation::Reduce)
2792 .addUse(I.getOperand(2).getReg());
2793}
2794
2795bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
2797 MachineInstr &I) const {
2798 MachineBasicBlock &BB = *I.getParent();
2799 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
2802 .addUse(I.getOperand(1).getReg())
2804}
2805
2806bool SPIRVInstructionSelector::selectFreeze(Register ResVReg,
2808 MachineInstr &I) const {
2809
2810
2811
2812
2813
2814 if (.getOperand(0).isReg() ||
.getOperand(1).isReg())
2815 return false;
2816 Register OpReg = I.getOperand(1).getReg();
2817 if (MachineInstr *Def = MRI->getVRegDef(OpReg)) {
2818 if (Def->getOpcode() == TargetOpcode::COPY)
2819 Def = MRI->getVRegDef(Def->getOperand(1).getReg());
2821 switch (Def->getOpcode()) {
2822 case SPIRV::ASSIGN_TYPE:
2823 if (MachineInstr *AssignToDef =
2824 MRI->getVRegDef(Def->getOperand(1).getReg())) {
2825 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
2826 Reg = Def->getOperand(2).getReg();
2827 }
2828 break;
2829 case SPIRV::OpUndef:
2830 Reg = Def->getOperand(1).getReg();
2831 break;
2832 }
2833 unsigned DestOpCode;
2835 DestOpCode = SPIRV::OpConstantNull;
2836 } else {
2837 DestOpCode = TargetOpcode::COPY;
2838 Reg = OpReg;
2839 }
2840 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(DestOpCode))
2841 .addDef(I.getOperand(0).getReg())
2844 }
2845 return false;
2846}
2847
2848bool SPIRVInstructionSelector::selectBuildVector(Register ResVReg,
2850 MachineInstr &I) const {
2851 unsigned N = 0;
2852 if (ResType->getOpcode() == SPIRV::OpTypeVector)
2854 else if (ResType->getOpcode() == SPIRV::OpTypeArray)
2856 else
2857 report_fatal_error("Cannot select G_BUILD_VECTOR with a non-vector result");
2858 if (I.getNumExplicitOperands() - I.getNumExplicitDefs() != N)
2859 report_fatal_error("G_BUILD_VECTOR and the result type are inconsistent");
2860
2861
2863 for (unsigned i = I.getNumExplicitDefs();
2864 i < I.getNumExplicitOperands() && IsConst; ++i)
2867
2868 if (!IsConst && N < 2)
2870 "There must be at least two constituent operands in a vector");
2871
2873 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
2874 TII.get(IsConst ? SPIRV::OpConstantComposite
2875 : SPIRV::OpCompositeConstruct))
2878 for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
2879 MIB.addUse(I.getOperand(i).getReg());
2881}
2882
2883bool SPIRVInstructionSelector::selectSplatVector(Register ResVReg,
2885 MachineInstr &I) const {
2886 unsigned N = 0;
2887 if (ResType->getOpcode() == SPIRV::OpTypeVector)
2889 else if (ResType->getOpcode() == SPIRV::OpTypeArray)
2891 else
2892 report_fatal_error("Cannot select G_SPLAT_VECTOR with a non-vector result");
2893
2895 if (.getOperand(OpIdx).isReg())
2897
2898
2901
2902 if (!IsConst && N < 2)
2904 "There must be at least two constituent operands in a vector");
2905
2907 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
2908 TII.get(IsConst ? SPIRV::OpConstantComposite
2909 : SPIRV::OpCompositeConstruct))
2912 for (unsigned i = 0; i < N; ++i)
2915}
2916
2917bool SPIRVInstructionSelector::selectDiscard(Register ResVReg,
2919 MachineInstr &I) const {
2920
2921 unsigned Opcode;
2922
2924 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation) ||
2926 Opcode = SPIRV::OpDemoteToHelperInvocation;
2927 } else {
2928 Opcode = SPIRV::OpKill;
2929
2930 if (MachineInstr *NextI = I.getNextNode()) {
2932 NextI->removeFromParent();
2933 }
2934 }
2935
2936 MachineBasicBlock &BB = *I.getParent();
2939}
2940
2941bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
2943 unsigned CmpOpc,
2944 MachineInstr &I) const {
2945 Register Cmp0 = I.getOperand(2).getReg();
2946 Register Cmp1 = I.getOperand(3).getReg();
2949 "CMP operands should have the same type");
2950 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
2957}
2958
2959bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
2961 MachineInstr &I) const {
2962 auto Pred = I.getOperand(1).getPredicate();
2963 unsigned CmpOpc;
2964
2965 Register CmpOperand = I.getOperand(2).getReg();
2966 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
2970 else
2972 return selectCmp(ResVReg, ResType, CmpOpc, I);
2973}
2974
2975std::pair<Register, bool>
2976SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
2977 const SPIRVType *ResType) const {
2981
2982 auto ConstInt = ConstantInt::get(LLVMTy, Val);
2985 if (!NewReg.isValid()) {
2986 NewReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
2987 MachineBasicBlock &BB = *I.getParent();
2988 MachineInstr *MI =
2989 Val == 0
2990 ? BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
2993 : BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
2996 .addImm(APInt(32, Val).getZExtValue());
2999 }
3000 return {NewReg, Result};
3001}
3002
3003bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
3005 MachineInstr &I) const {
3006 unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
3007 return selectCmp(ResVReg, ResType, CmpOp, I);
3008}
3009
3010Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
3011 MachineInstr &I) const {
3012
3013 bool ZeroAsNull = !STI.isShader();
3014 if (ResType->getOpcode() == SPIRV::OpTypeVector)
3017}
3018
3019Register SPIRVInstructionSelector::buildZerosValF(const SPIRVType *ResType,
3020 MachineInstr &I) const {
3021
3022 bool ZeroAsNull = !STI.isShader();
3024 if (ResType->getOpcode() == SPIRV::OpTypeVector)
3027}
3028
3029Register SPIRVInstructionSelector::buildOnesValF(const SPIRVType *ResType,
3030 MachineInstr &I) const {
3031
3032 bool ZeroAsNull = !STI.isShader();
3034 if (ResType->getOpcode() == SPIRV::OpTypeVector)
3037}
3038
3039Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
3041 MachineInstr &I) const {
3043 APInt One =
3045 if (ResType->getOpcode() == SPIRV::OpTypeVector)
3048}
3049
3050bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
3052 MachineInstr &I) const {
3053 Register SelectFirstArg = I.getOperand(2).getReg();
3054 Register SelectSecondArg = I.getOperand(3).getReg();
3057
3058 bool IsFloatTy =
3060 bool IsPtrTy =
3063 SPIRV::OpTypeVector;
3064
3065 bool IsScalarBool =
3066 GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
3067 unsigned Opcode;
3068 if (IsVectorTy) {
3069 if (IsFloatTy) {
3070 Opcode = IsScalarBool ? SPIRV::OpSelectVFSCond : SPIRV::OpSelectVFVCond;
3071 } else if (IsPtrTy) {
3072 Opcode = IsScalarBool ? SPIRV::OpSelectVPSCond : SPIRV::OpSelectVPVCond;
3073 } else {
3074 Opcode = IsScalarBool ? SPIRV::OpSelectVISCond : SPIRV::OpSelectVIVCond;
3075 }
3076 } else {
3077 if (IsFloatTy) {
3078 Opcode = IsScalarBool ? SPIRV::OpSelectSFSCond : SPIRV::OpSelectVFVCond;
3079 } else if (IsPtrTy) {
3080 Opcode = IsScalarBool ? SPIRV::OpSelectSPSCond : SPIRV::OpSelectVPVCond;
3081 } else {
3082 Opcode = IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
3083 }
3084 }
3085 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
3088 .addUse(I.getOperand(1).getReg())
3089 .addUse(SelectFirstArg)
3090 .addUse(SelectSecondArg)
3092}
3093
3094bool SPIRVInstructionSelector::selectSelectDefaultArgs(Register ResVReg,
3096 MachineInstr &I,
3097 bool IsSigned) const {
3098
3099 Register ZeroReg = buildZerosVal(ResType, I);
3100 Register OneReg = buildOnesVal(IsSigned, ResType, I);
3101 bool IsScalarBool =
3102 GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
3103 unsigned Opcode =
3104 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
3105 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
3108 .addUse(I.getOperand(1).getReg())
3112}
3113
3114bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
3116 MachineInstr &I, bool IsSigned,
3117 unsigned Opcode) const {
3118 Register SrcReg = I.getOperand(1).getReg();
3119
3120
3124 if (ResType->getOpcode() == SPIRV::OpTypeVector) {
3127 }
3129 selectSelectDefaultArgs(SrcReg, TmpType, I, false);
3130 }
3131 return selectOpWithSrcs(ResVReg, ResType, I, {SrcReg}, Opcode);
3132}
3133
3134bool SPIRVInstructionSelector::selectExt(Register ResVReg,
3136 MachineInstr &I, bool IsSigned) const {
3137 Register SrcReg = I.getOperand(1).getReg();
3139 return selectSelectDefaultArgs(ResVReg, ResType, I, IsSigned);
3140
3142 if (SrcType == ResType)
3143 return BuildCOPY(ResVReg, SrcReg, I);
3144
3145 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
3146 return selectUnOp(ResVReg, ResType, I, Opcode);
3147}
3148
3149bool SPIRVInstructionSelector::selectSUCmp(Register ResVReg,
3151 MachineInstr &I,
3152 bool IsSigned) const {
3153 MachineIRBuilder MIRBuilder(I);
3154 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
3155 MachineBasicBlock &BB = *I.getParent();
3156
3159 if (N > 1)
3162
3163
3164
3169 TII.get(IsSigned ? SPIRV::OpSLessThanEqual
3170 : SPIRV::OpULessThanEqual))
3171 .addDef(IsLessEqReg)
3172 .addUse(BoolTypeReg)
3173 .addUse(I.getOperand(1).getReg())
3174 .addUse(I.getOperand(2).getReg())
3180 TII.get(IsSigned ? SPIRV::OpSLessThan : SPIRV::OpULessThan))
3182 .addUse(BoolTypeReg)
3183 .addUse(I.getOperand(1).getReg())
3184 .addUse(I.getOperand(2).getReg())
3186
3189 MRI->createVirtualRegister(GR.getRegClass(ResType));
3192 unsigned SelectOpcode =
3193 N > 1 ? SPIRV::OpSelectVIVCond : SPIRV::OpSelectSISCond;
3195 .addDef(NegOneOrZeroReg)
3198 .addUse(buildOnesVal(true, ResType, I))
3199 .addUse(buildZerosVal(ResType, I))
3204 .addUse(IsLessEqReg)
3205 .addUse(NegOneOrZeroReg)
3206 .addUse(buildOnesVal(false, ResType, I))
3208}
3209
3210bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
3212 MachineInstr &I,
3214 const SPIRVType *BoolTy) const {
3215
3217 bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
3218 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
3220 Register One = buildOnesVal(false, IntTy, I);
3221 MachineBasicBlock &BB = *I.getParent();
3234}
3235
3236bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
3238 MachineInstr &I) const {
3239 Register IntReg = I.getOperand(1).getReg();
3242 return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType);
3243 if (ArgType == ResType)
3244 return BuildCOPY(ResVReg, IntReg, I);
3246 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
3247 return selectUnOp(ResVReg, ResType, I, Opcode);
3248}
3249
3250bool SPIRVInstructionSelector::selectConst(Register ResVReg,
3252 MachineInstr &I) const {
3253 unsigned Opcode = I.getOpcode();
3254 unsigned TpOpcode = ResType->getOpcode();
3256 if (TpOpcode == SPIRV::OpTypePointer || TpOpcode == SPIRV::OpTypeEvent) {
3257 assert(Opcode == TargetOpcode::G_CONSTANT &&
3258 I.getOperand(1).getCImm()->isZero());
3259 MachineBasicBlock &DepMBB = I.getMF()->front();
3260 MachineIRBuilder MIRBuilder(DepMBB, DepMBB.getFirstNonPHI());
3262 } else if (Opcode == TargetOpcode::G_FCONSTANT) {
3265 } else {
3268 }
3269 return Reg == ResVReg ? true : BuildCOPY(ResVReg, Reg, I);
3270}
3271
3272bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
3274 MachineInstr &I) const {
3275 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
3279}
3280
3281bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
3283 MachineInstr &I) const {
3284 MachineBasicBlock &BB = *I.getParent();
3285 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
3288
3289 .addUse(I.getOperand(3).getReg())
3290
3291 .addUse(I.getOperand(2).getReg());
3292 for (unsigned i = 4; i < I.getNumOperands(); i++)
3295}
3296
3297bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
3299 MachineInstr &I) const {
3300 MachineBasicBlock &BB = *I.getParent();
3301 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
3304 .addUse(I.getOperand(2).getReg());
3305 for (unsigned i = 3; i < I.getNumOperands(); i++)
3308}
3309
3310bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
3312 MachineInstr &I) const {
3314 return selectInsertVal(ResVReg, ResType, I);
3315 MachineBasicBlock &BB = *I.getParent();
3316 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
3319 .addUse(I.getOperand(2).getReg())
3320 .addUse(I.getOperand(3).getReg())
3321 .addUse(I.getOperand(4).getReg())
3323}
3324
3325bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
3327 MachineInstr &I) const {
3329 return selectExtractVal(ResVReg, ResType, I);
3330 MachineBasicBlock &BB = *I.getParent();
3331 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
3334 .addUse(I.getOperand(2).getReg())
3335 .addUse(I.getOperand(3).getReg())
3337}
3338
3339bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
3341 MachineInstr &I) const {
3342 const bool IsGEPInBounds = I.getOperand(2).getImm();
3343
3344
3345
3346
3348 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
3349 : SPIRV::OpAccessChain)
3350 : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
3351 : SPIRV::OpPtrAccessChain);
3352
3353 auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
3356
3357 .addUse(I.getOperand(3).getReg());
3359 (Opcode == SPIRV::OpPtrAccessChain ||
3360 Opcode == SPIRV::OpInBoundsPtrAccessChain ||
3362 "Cannot translate GEP to OpAccessChain. First index must be 0.");
3363
3364
3365 const unsigned StartingIndex =
3366 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
3367 ? 5
3368 : 4;
3369 for (unsigned i = StartingIndex; i < I.getNumExplicitOperands(); ++i)
3370 Res.addUse(I.getOperand(i).getReg());
3371 return Res.constrainAllUses(TII, TRI, RBI);
3372}
3373
3374
3375bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
3378 unsigned Lim = I.getNumExplicitOperands();
3379 for (unsigned i = I.getNumExplicitDefs() + 1; i < Lim; ++i) {
3380 Register OpReg = I.getOperand(i).getReg();
3381 MachineInstr *OpDefine = MRI->getVRegDef(OpReg);
3383 SmallPtrSet<SPIRVType *, 4> Visited;
3384 if (!OpDefine || !OpType || isConstReg(MRI, OpDefine, Visited) ||
3385 OpDefine->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
3386 OpDefine->getOpcode() == TargetOpcode::G_INTTOPTR ||
3388
3389
3391 continue;
3392 }
3393 MachineFunction *MF = I.getMF();
3395 if (WrapReg.isValid()) {
3396 CompositeArgs.push_back(WrapReg);
3397 continue;
3398 }
3399
3400 WrapReg = MRI->createVirtualRegister(GR.getRegClass(OpType));
3401 CompositeArgs.push_back(WrapReg);
3402
3405 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
3406 TII.get(SPIRV::OpSpecConstantOp))
3409 .addImm(static_cast<uint32_t>(SPIRV::Opcode::Bitcast))
3411 GR.add(OpDefine, MIB);
3413 if (!Result)
3414 break;
3415 }
3417}
3418
3419bool SPIRVInstructionSelector::selectDerivativeInst(
3421 const unsigned DPdOpCode) const {
3422
3423
3424 errorIfInstrOutsideShader(I);
3425
3426
3427
3428
3429 Register SrcReg = I.getOperand(2).getReg();
3434 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(DPdOpCode))
3437 .addUse(I.getOperand(2).getReg());
3438
3439 MachineIRBuilder MIRBuilder(I);
3442 if (componentCount != 1)
3444 MIRBuilder, false);
3445
3446 const TargetRegisterClass *RegClass = GR.getRegClass(SrcType);
3447 Register ConvertToVReg = MRI->createVirtualRegister(RegClass);
3448 Register DpdOpVReg = MRI->createVirtualRegister(RegClass);
3449
3451 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpFConvert))
3452 .addDef(ConvertToVReg)
3459 .addUse(ConvertToVReg)
3462 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpFConvert))
3468}
3469
3470bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
3472 MachineInstr &I) const {
3473 MachineBasicBlock &BB = *I.getParent();
3475 switch (IID) {
3476 case Intrinsic::spv_load:
3477 return selectLoad(ResVReg, ResType, I);
3478 case Intrinsic::spv_store:
3479 return selectStore(I);
3480 case Intrinsic::spv_extractv:
3481 return selectExtractVal(ResVReg, ResType, I);
3482 case Intrinsic::spv_insertv:
3483 return selectInsertVal(ResVReg, ResType, I);
3484 case Intrinsic::spv_extractelt:
3485 return selectExtractElt(ResVReg, ResType, I);
3486 case Intrinsic::spv_insertelt:
3487 return selectInsertElt(ResVReg, ResType, I);
3488 case Intrinsic::spv_gep:
3489 return selectGEP(ResVReg, ResType, I);
3490 case Intrinsic::spv_bitcast: {
3491 Register OpReg = I.getOperand(2).getReg();
3495 report_fatal_error("incompatible result and operand types in a bitcast");
3496 return selectOpWithSrcs(ResVReg, ResType, I, {OpReg}, SPIRV::OpBitcast);
3497 }
3498 case Intrinsic::spv_unref_global:
3499 case Intrinsic::spv_init_global: {
3500 MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
3501 MachineInstr *Init = I.getNumExplicitOperands() > 2
3502 ? MRI->getVRegDef(I.getOperand(2).getReg())
3503 : nullptr;
3505 Register GVarVReg = MI->getOperand(0).getReg();
3506 bool Res = selectGlobalValue(GVarVReg, *MI, Init);
3507
3508
3509
3510 if (MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
3512 MI->removeFromParent();
3513 }
3514 return Res;
3515 }
3516 case Intrinsic::spv_undef: {
3517 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
3521 }
3522 case Intrinsic::spv_const_composite: {
3523
3524 bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
3527
3528
3529 if (!IsNull) {
3530 if (!wrapIntoSpecConstantOp(I, CompositeArgs))
3531 return false;
3532 MachineIRBuilder MIR(I);
3534 MIR, SPIRV::OpConstantComposite, 3,
3535 SPIRV::OpConstantCompositeContinuedINTEL, CompositeArgs, ResVReg,
3537 for (auto *Instr : Instructions) {
3538 Instr->setDebugLoc(I.getDebugLoc());
3540 return false;
3541 }
3542 return true;
3543 } else {
3544 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
3548 }
3549 }
3550 case Intrinsic::spv_assign_name: {
3551 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
3552 MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
3553 for (unsigned i = I.getNumExplicitDefs() + 2;
3554 i < I.getNumExplicitOperands(); ++i) {
3555 MIB.addImm(I.getOperand(i).getImm());
3556 }
3558 }
3559 case Intrinsic::spv_switch: {
3560 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
3561 for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
3562 if (I.getOperand(i).isReg())
3563 MIB.addReg(I.getOperand(i).getReg());
3564 else if (I.getOperand(i).isCImm())
3565 addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
3566 else if (I.getOperand(i).isMBB())
3567 MIB.addMBB(I.getOperand(i).getMBB());
3568 else
3570 }
3572 }
3573 case Intrinsic::spv_loop_merge: {
3574 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpLoopMerge));
3575 for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
3576 if (I.getOperand(i).isMBB())
3577 MIB.addMBB(I.getOperand(i).getMBB());
3578 else
3580 }
3582 }
3583 case Intrinsic::spv_selection_merge: {
3584 auto MIB =
3585 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSelectionMerge));
3586 assert(I.getOperand(1).isMBB() &&
3587 "operand 1 to spv_selection_merge must be a basic block");
3588 MIB.addMBB(I.getOperand(1).getMBB());
3589 MIB.addImm(getSelectionOperandForImm(I.getOperand(2).getImm()));
3591 }
3592 case Intrinsic::spv_cmpxchg:
3593 return selectAtomicCmpXchg(ResVReg, ResType, I);
3594 case Intrinsic::spv_unreachable:
3595 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable))
3597 case Intrinsic::spv_alloca:
3598 return selectFrameIndex(ResVReg, ResType, I);
3599 case Intrinsic::spv_alloca_array:
3600 return selectAllocaArray(ResVReg, ResType, I);
3601 case Intrinsic::spv_assume:
3602 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
3603 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
3604 .addUse(I.getOperand(1).getReg())
3606 break;
3607 case Intrinsic::spv_expect:
3608 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
3609 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
3612 .addUse(I.getOperand(2).getReg())
3613 .addUse(I.getOperand(3).getReg())
3615 break;
3616 case Intrinsic::arithmetic_fence:
3617 if (STI.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence))
3618 return BuildMI(BB, I, I.getDebugLoc(),
3619 TII.get(SPIRV::OpArithmeticFenceEXT))
3622 .addUse(I.getOperand(2).getReg())
3624 else
3625 return BuildCOPY(ResVReg, I.getOperand(2).getReg(), I);
3626 break;
3627 case Intrinsic::spv_thread_id:
3628
3629
3630
3631
3632
3633 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalInvocationId, ResVReg,
3634 ResType, I);
3635 case Intrinsic::spv_thread_id_in_group:
3636
3637
3638
3639
3640
3641 return loadVec3BuiltinInputID(SPIRV::BuiltIn::LocalInvocationId, ResVReg,
3642 ResType, I);
3643 case Intrinsic::spv_group_id:
3644
3645
3646
3647
3648
3649 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupId, ResVReg, ResType,
3650 I);
3651 case Intrinsic::spv_flattened_thread_id_in_group:
3652
3653
3654
3655
3656
3657
3658 return loadBuiltinInputID(SPIRV::BuiltIn::LocalInvocationIndex, ResVReg,
3659 ResType, I);
3660 case Intrinsic::spv_workgroup_size:
3661 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupSize, ResVReg,
3662 ResType, I);
3663 case Intrinsic::spv_global_size:
3664 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalSize, ResVReg, ResType,
3665 I);
3666 case Intrinsic::spv_global_offset:
3667 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalOffset, ResVReg,
3668 ResType, I);
3669 case Intrinsic::spv_num_workgroups:
3670 return loadVec3BuiltinInputID(SPIRV::BuiltIn::NumWorkgroups, ResVReg,
3671 ResType, I);
3672 case Intrinsic::spv_subgroup_size:
3673 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupSize, ResVReg, ResType,
3674 I);
3675 case Intrinsic::spv_num_subgroups:
3676 return loadBuiltinInputID(SPIRV::BuiltIn::NumSubgroups, ResVReg, ResType,
3677 I);
3678 case Intrinsic::spv_subgroup_id:
3679 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupId, ResVReg, ResType, I);
3680 case Intrinsic::spv_subgroup_local_invocation_id:
3681 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupLocalInvocationId,
3682 ResVReg, ResType, I);
3683 case Intrinsic::spv_subgroup_max_size:
3684 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupMaxSize, ResVReg, ResType,
3685 I);
3686 case Intrinsic::spv_fdot:
3687 return selectFloatDot(ResVReg, ResType, I);
3688 case Intrinsic::spv_udot:
3689 case Intrinsic::spv_sdot:
3690 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3692 return selectIntegerDot(ResVReg, ResType, I,
3693 IID == Intrinsic::spv_sdot);
3694 return selectIntegerDotExpansion(ResVReg, ResType, I);
3695 case Intrinsic::spv_dot4add_i8packed:
3696 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3698 return selectDot4AddPacked(ResVReg, ResType, I);
3699 return selectDot4AddPackedExpansion(ResVReg, ResType, I);
3700 case Intrinsic::spv_dot4add_u8packed:
3701 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3703 return selectDot4AddPacked(ResVReg, ResType, I);
3704 return selectDot4AddPackedExpansion(ResVReg, ResType, I);
3705 case Intrinsic::spv_all:
3706 return selectAll(ResVReg, ResType, I);
3707 case Intrinsic::spv_any:
3708 return selectAny(ResVReg, ResType, I);
3709 case Intrinsic::spv_cross:
3710 return selectExtInst(ResVReg, ResType, I, CL::cross, GL::Cross);
3711 case Intrinsic::spv_distance:
3712 return selectExtInst(ResVReg, ResType, I, CL::distance, GL::Distance);
3713 case Intrinsic::spv_lerp:
3714 return selectExtInst(ResVReg, ResType, I, CL::mix, GL::FMix);
3715 case Intrinsic::spv_length:
3716 return selectExtInst(ResVReg, ResType, I, CL::length, GL::Length);
3717 case Intrinsic::spv_degrees:
3718 return selectExtInst(ResVReg, ResType, I, CL::degrees, GL::Degrees);
3719 case Intrinsic::spv_faceforward:
3720 return selectExtInst(ResVReg, ResType, I, GL::FaceForward);
3721 case Intrinsic::spv_frac:
3722 return selectExtInst(ResVReg, ResType, I, CL::fract, GL::Fract);
3723 case Intrinsic::spv_isinf:
3724 return selectOpIsInf(ResVReg, ResType, I);
3725 case Intrinsic::spv_isnan:
3726 return selectOpIsNan(ResVReg, ResType, I);
3727 case Intrinsic::spv_normalize:
3728 return selectExtInst(ResVReg, ResType, I, CL::normalize, GL::Normalize);
3729 case Intrinsic::spv_refract:
3730 return selectExtInst(ResVReg, ResType, I, GL::Refract);
3731 case Intrinsic::spv_reflect:
3732 return selectExtInst(ResVReg, ResType, I, GL::Reflect);
3733 case Intrinsic::spv_rsqrt:
3734 return selectExtInst(ResVReg, ResType, I, CL::rsqrt, GL::InverseSqrt);
3735 case Intrinsic::spv_sign:
3736 return selectSign(ResVReg, ResType, I);
3737 case Intrinsic::spv_smoothstep:
3738 return selectExtInst(ResVReg, ResType, I, CL::smoothstep, GL::SmoothStep);
3739 case Intrinsic::spv_firstbituhigh:
3740 return selectFirstBitHigh(ResVReg, ResType, I, false);
3741 case Intrinsic::spv_firstbitshigh:
3742 return selectFirstBitHigh(ResVReg, ResType, I, true);
3743 case Intrinsic::spv_firstbitlow:
3744 return selectFirstBitLow(ResVReg, ResType, I);
3745 case Intrinsic::spv_group_memory_barrier_with_group_sync: {
3747 auto MemSemConstant =
3748 buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent, I);
3749 Register MemSemReg = MemSemConstant.first;
3750 Result &= MemSemConstant.second;
3751 auto ScopeConstant = buildI32Constant(SPIRV::Scope::Workgroup, I);
3752 Register ScopeReg = ScopeConstant.first;
3753 Result &= ScopeConstant.second;
3754 MachineBasicBlock &BB = *I.getParent();
3756 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpControlBarrier))
3761 }
3762 case Intrinsic::spv_generic_cast_to_ptr_explicit: {
3763 Register PtrReg = I.getOperand(I.getNumExplicitDefs() + 1).getReg();
3764 SPIRV::StorageClass::StorageClass ResSC =
3767 report_fatal_error("The target storage class is not castable from the "
3768 "Generic storage class");
3769 return BuildMI(BB, I, I.getDebugLoc(),
3770 TII.get(SPIRV::OpGenericCastToPtrExplicit))
3776 }
3777 case Intrinsic::spv_lifetime_start:
3778 case Intrinsic::spv_lifetime_end: {
3779 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
3780 : SPIRV::OpLifetimeStop;
3781 int64_t Size = I.getOperand(I.getNumExplicitDefs() + 1).getImm();
3782 Register PtrReg = I.getOperand(I.getNumExplicitDefs() + 2).getReg();
3783 if (Size == -1)
3789 }
3790 case Intrinsic::spv_saturate:
3791 return selectSaturate(ResVReg, ResType, I);
3792 case Intrinsic::spv_nclamp:
3793 return selectExtInst(ResVReg, ResType, I, CL::fclamp, GL::NClamp);
3794 case Intrinsic::spv_uclamp:
3795 return selectExtInst(ResVReg, ResType, I, CL::u_clamp, GL::UClamp);
3796 case Intrinsic::spv_sclamp:
3797 return selectExtInst(ResVReg, ResType, I, CL::s_clamp, GL::SClamp);
3798 case Intrinsic::spv_wave_active_countbits:
3799 return selectWaveActiveCountBits(ResVReg, ResType, I);
3800 case Intrinsic::spv_wave_all:
3801 return selectWaveOpInst(ResVReg, ResType, I, SPIRV::OpGroupNonUniformAll);
3802 case Intrinsic::spv_wave_any:
3803 return selectWaveOpInst(ResVReg, ResType, I, SPIRV::OpGroupNonUniformAny);
3804 case Intrinsic::spv_wave_is_first_lane:
3805 return selectWaveOpInst(ResVReg, ResType, I, SPIRV::OpGroupNonUniformElect);
3806 case Intrinsic::spv_wave_reduce_umax:
3807 return selectWaveReduceMax(ResVReg, ResType, I, true);
3808 case Intrinsic::spv_wave_reduce_max:
3809 return selectWaveReduceMax(ResVReg, ResType, I, false);
3810 case Intrinsic::spv_wave_reduce_umin:
3811 return selectWaveReduceMin(ResVReg, ResType, I, true);
3812 case Intrinsic::spv_wave_reduce_min:
3813 return selectWaveReduceMin(ResVReg, ResType, I, false);
3814 case Intrinsic::spv_wave_reduce_sum:
3815 return selectWaveReduceSum(ResVReg, ResType, I);
3816 case Intrinsic::spv_wave_readlane:
3817 return selectWaveOpInst(ResVReg, ResType, I,
3818 SPIRV::OpGroupNonUniformShuffle);
3819 case Intrinsic::spv_step:
3820 return selectExtInst(ResVReg, ResType, I, CL::step, GL::Step);
3821 case Intrinsic::spv_radians:
3822 return selectExtInst(ResVReg, ResType, I, CL::radians, GL::Radians);
3823
3824
3825
3826 case Intrinsic::instrprof_increment:
3827 case Intrinsic::instrprof_increment_step:
3828 case Intrinsic::instrprof_value_profile:
3829 break;
3830
3831 case Intrinsic::spv_value_md:
3832 break;
3833 case Intrinsic::spv_resource_handlefrombinding: {
3834 return selectHandleFromBinding(ResVReg, ResType, I);
3835 }
3836 case Intrinsic::spv_resource_counterhandlefrombinding:
3837 return selectCounterHandleFromBinding(ResVReg, ResType, I);
3838 case Intrinsic::spv_resource_updatecounter:
3839 return selectUpdateCounter(ResVReg, ResType, I);
3840 case Intrinsic::spv_resource_store_typedbuffer: {
3841 return selectImageWriteIntrinsic(I);
3842 }
3843 case Intrinsic::spv_resource_load_typedbuffer: {
3844 return selectReadImageIntrinsic(ResVReg, ResType, I);
3845 }
3846 case Intrinsic::spv_resource_getpointer: {
3847 return selectResourceGetPointer(ResVReg, ResType, I);
3848 }
3849 case Intrinsic::spv_discard: {
3850 return selectDiscard(ResVReg, ResType, I);
3851 }
3852 case Intrinsic::spv_resource_nonuniformindex: {
3853 return selectResourceNonUniformIndex(ResVReg, ResType, I);
3854 }
3855 case Intrinsic::spv_unpackhalf2x16: {
3856 return selectExtInst(ResVReg, ResType, I, GL::UnpackHalf2x16);
3857 }
3858 case Intrinsic::spv_ddx_coarse:
3859 return selectDerivativeInst(ResVReg, ResType, I, SPIRV::OpDPdxCoarse);
3860 case Intrinsic::spv_ddy_coarse:
3861 return selectDerivativeInst(ResVReg, ResType, I, SPIRV::OpDPdyCoarse);
3862 case Intrinsic::spv_fwidth:
3863 return selectDerivativeInst(ResVReg, ResType, I, SPIRV::OpFwidth);
3864 default: {
3865 std::string DiagMsg;
3866 raw_string_ostream OS(DiagMsg);
3867 I.print(OS);
3868 DiagMsg = "Intrinsic selection not implemented: " + DiagMsg;
3870 }
3871 }
3872 return true;
3873}
3874
3875bool SPIRVInstructionSelector::selectHandleFromBinding(Register &ResVReg,
3877 MachineInstr &I) const {
3878
3879
3880 if (ResType->getOpcode() == SPIRV::OpTypeImage)
3881 return true;
3882
3883 return loadHandleBeforePosition(ResVReg, GR.getSPIRVTypeForVReg(ResVReg),
3885}
3886
3887bool SPIRVInstructionSelector::selectCounterHandleFromBinding(
3888 Register &ResVReg, const SPIRVType *ResType, MachineInstr &I) const {
3890 assert(Intr.getIntrinsicID() ==
3891 Intrinsic::spv_resource_counterhandlefrombinding);
3892
3893
3894 Register MainHandleReg = Intr.getOperand(2).getReg();
3896 assert(MainHandleDef->getIntrinsicID() ==
3897 Intrinsic::spv_resource_handlefrombinding);
3898
3901 uint32_t ArraySize = getIConstVal(MainHandleDef->getOperand(4).getReg(), MRI);
3902 Register IndexReg = MainHandleDef->getOperand(5).getReg();
3903 std::string CounterName =
3905 ".counter";
3906
3907
3908 MachineIRBuilder MIRBuilder(I);
3909 Register CounterVarReg = buildPointerToResource(
3911 Binding, ArraySize, IndexReg, CounterName, MIRBuilder);
3912
3913 return BuildCOPY(ResVReg, CounterVarReg, I);
3914}
3915
3916bool SPIRVInstructionSelector::selectUpdateCounter(Register &ResVReg,
3918 MachineInstr &I) const {
3920 assert(Intr.getIntrinsicID() == Intrinsic::spv_resource_updatecounter);
3921
3922 Register CounterHandleReg = Intr.getOperand(2).getReg();
3923 Register IncrReg = Intr.getOperand(3).getReg();
3924
3925
3926
3927
3928#ifndef NDEBUG
3931 assert(CounterVarPointeeType &&
3932 CounterVarPointeeType->getOpcode() == SPIRV::OpTypeStruct &&
3933 "Counter variable must be a struct");
3935 SPIRV::StorageClass::StorageBuffer &&
3936 "Counter variable must be in the storage buffer storage class");
3938 "Counter variable must have exactly 1 member in the struct");
3942 "Counter variable struct must have a single i32 member");
3943#endif
3944
3945
3946 MachineIRBuilder MIRBuilder(I);
3948 Type::getInt32Ty(I.getMF()->getFunction().getContext());
3949
3951 LLVMIntType, MIRBuilder, SPIRV::StorageClass::StorageBuffer);
3952
3953 auto Zero = buildI32Constant(0, I);
3954 if (.second)
3955 return false;
3956
3958 MRI->createVirtualRegister(GR.getRegClass(IntPtrType));
3959 if ((*I.getParent(), I, I.getDebugLoc(),
3960 TII.get(SPIRV::OpAccessChain))
3961 .addDef(PtrToCounter)
3963 .addUse(CounterHandleReg)
3966 return false;
3967 }
3968
3969
3970
3971 auto Scope = buildI32Constant(SPIRV::Scope::Device, I);
3972 if (.second)
3973 return false;
3974 auto Semantics = buildI32Constant(SPIRV::MemorySemantics::None, I);
3975 if (!Semantics.second)
3976 return false;
3977
3979 auto Incr = buildI32Constant(static_cast<uint32_t>(IncrVal), I);
3980 if (!Incr.second)
3981 return false;
3982
3984 if ((*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpAtomicIAdd))
3987 .addUse(PtrToCounter)
3989 .addUse(Semantics.first)
3992 return false;
3993 }
3994 if (IncrVal >= 0) {
3995 return BuildCOPY(ResVReg, AtomicRes, I);
3996 }
3997
3998
3999
4000
4001
4002
4003 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS))
4009}
4010bool SPIRVInstructionSelector::selectReadImageIntrinsic(
4011 Register &ResVReg, const SPIRVType *ResType, MachineInstr &I) const {
4012
4013
4014
4015
4016
4017
4018
4019 Register ImageReg = I.getOperand(2).getReg();
4021 Register NewImageReg = MRI->createVirtualRegister(MRI->getRegClass(ImageReg));
4022 if (!loadHandleBeforePosition(NewImageReg, GR.getSPIRVTypeForVReg(ImageReg),
4023 *ImageDef, I)) {
4024 return false;
4025 }
4026
4027 Register IdxReg = I.getOperand(3).getReg();
4029 MachineInstr &Pos = I;
4030
4031 return generateImageReadOrFetch(ResVReg, ResType, NewImageReg, IdxReg, Loc,
4032 Pos);
4033}
4034
4035bool SPIRVInstructionSelector::generateImageReadOrFetch(
4039 assert(ImageType && ImageType->getOpcode() == SPIRV::OpTypeImage &&
4040 "ImageReg is not an image type.");
4041
4042 bool IsSignedInteger =
4044
4045
4046 auto SampledOp = ImageType->getOperand(6);
4047 bool IsFetch = (SampledOp.getImm() == 1);
4048
4050 if (ResultSize == 4) {
4051 auto BMI =
4053 TII.get(IsFetch ? SPIRV::OpImageFetch : SPIRV::OpImageRead))
4058
4059 if (IsSignedInteger)
4060 BMI.addImm(0x1000);
4062 }
4063
4064 SPIRVType *ReadType = widenTypeToVec4(ResType, Pos);
4066 auto BMI =
4068 TII.get(IsFetch ? SPIRV::OpImageFetch : SPIRV::OpImageRead))
4073 if (IsSignedInteger)
4074 BMI.addImm(0x1000);
4076 if (!Succeed)
4077 return false;
4078
4079 if (ResultSize == 1) {
4081 TII.get(SPIRV::OpCompositeExtract))
4087 }
4088 return extractSubvector(ResVReg, ResType, ReadReg, Pos);
4089}
4090
4091bool SPIRVInstructionSelector::selectResourceGetPointer(
4092 Register &ResVReg, const SPIRVType *ResType, MachineInstr &I) const {
4093 Register ResourcePtr = I.getOperand(2).getReg();
4095 if (RegType->getOpcode() == SPIRV::OpTypeImage) {
4096
4097
4098
4099
4100 return true;
4101 }
4102
4104 MachineIRBuilder MIRBuilder(I);
4105
4106 Register IndexReg = I.getOperand(3).getReg();
4109 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
4110 TII.get(SPIRV::OpAccessChain))
4113 .addUse(ResourcePtr)
4117}
4118
4119bool SPIRVInstructionSelector::selectResourceNonUniformIndex(
4120 Register &ResVReg, const SPIRVType *ResType, MachineInstr &I) const {
4121 Register ObjReg = I.getOperand(2).getReg();
4122 if (!BuildCOPY(ResVReg, ObjReg, I))
4123 return false;
4124
4126
4127
4128
4129
4130
4131
4132 decorateUsesAsNonUniform(ResVReg);
4133 return true;
4134}
4135
4136void SPIRVInstructionSelector::decorateUsesAsNonUniform(
4137 Register &NonUniformReg) const {
4139 while (WorkList.size() > 0) {
4142
4143 bool IsDecorated = false;
4144 for (MachineInstr &Use : MRI->use_instructions(CurrentReg)) {
4145 if (Use.getOpcode() == SPIRV::OpDecorate &&
4146 Use.getOperand(1).getImm() == SPIRV::Decoration::NonUniformEXT) {
4147 IsDecorated = true;
4148 continue;
4149 }
4150
4151
4152 if (Use.getOperand(0).isReg() && Use.getOperand(0).isDef()) {
4153 Register ResultReg = Use.getOperand(0).getReg();
4154 if (ResultReg == CurrentReg)
4155 continue;
4157 }
4158 }
4159
4160 if (!IsDecorated) {
4162 SPIRV::Decoration::NonUniformEXT, {});
4163 }
4164 }
4165}
4166
4167bool SPIRVInstructionSelector::extractSubvector(
4169 MachineInstr &InsertionPoint) const {
4171 [[maybe_unused]] uint64_t InputSize =
4174 assert(InputSize > 1 && "The input must be a vector.");
4175 assert(ResultSize > 1 && "The result must be a vector.");
4176 assert(ResultSize < InputSize &&
4177 "Cannot extract more element than there are in the input.");
4180 const TargetRegisterClass *ScalarRegClass = GR.getRegClass(ScalarType);
4181 for (uint64_t I = 0; I < ResultSize; I++) {
4182 Register ComponentReg = MRI->createVirtualRegister(ScalarRegClass);
4183 bool Succeed = BuildMI(*InsertionPoint.getParent(), InsertionPoint,
4185 TII.get(SPIRV::OpCompositeExtract))
4186 .addDef(ComponentReg)
4191 if (!Succeed)
4192 return false;
4193 ComponentRegisters.emplace_back(ComponentReg);
4194 }
4195
4196 MachineInstrBuilder MIB = BuildMI(*InsertionPoint.getParent(), InsertionPoint,
4198 TII.get(SPIRV::OpCompositeConstruct))
4201
4202 for (Register ComponentReg : ComponentRegisters)
4203 MIB.addUse(ComponentReg);
4205}
4206
4207bool SPIRVInstructionSelector::selectImageWriteIntrinsic(
4208 MachineInstr &I) const {
4209
4210
4211
4212
4213
4214
4215 Register ImageReg = I.getOperand(1).getReg();
4217 Register NewImageReg = MRI->createVirtualRegister(MRI->getRegClass(ImageReg));
4218 if (!loadHandleBeforePosition(NewImageReg, GR.getSPIRVTypeForVReg(ImageReg),
4219 *ImageDef, I)) {
4220 return false;
4221 }
4222
4223 Register CoordinateReg = I.getOperand(2).getReg();
4224 Register DataReg = I.getOperand(3).getReg();
4227 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
4228 TII.get(SPIRV::OpImageWrite))
4229 .addUse(NewImageReg)
4230 .addUse(CoordinateReg)
4233}
4234
4235Register SPIRVInstructionSelector::buildPointerToResource(
4236 const SPIRVType *SpirvResType, SPIRV::StorageClass::StorageClass SC,
4237 uint32_t Set, uint32_t Binding, uint32_t ArraySize, Register IndexReg,
4238 StringRef Name, MachineIRBuilder MIRBuilder) const {
4240 if (ArraySize == 1) {
4244 "SpirvResType did not have an explicit layout.");
4246 MIRBuilder);
4247 }
4248
4249 const Type *VarType = ArrayType::get(const_cast<Type *>(ResType), ArraySize);
4253 VarPointerType, Set, Binding, Name, MIRBuilder);
4254
4258
4259 MIRBuilder.buildInstr(SPIRV::OpAccessChain)
4264
4265 return AcReg;
4266}
4267
4268bool SPIRVInstructionSelector::selectFirstBitSet16(
4270 unsigned ExtendOpcode, unsigned BitSetOpcode) const {
4272 bool Result = selectOpWithSrcs(ExtReg, ResType, I, {I.getOperand(2).getReg()},
4273 ExtendOpcode);
4274
4276 selectFirstBitSet32(ResVReg, ResType, I, ExtReg, BitSetOpcode);
4277}
4278
4279bool SPIRVInstructionSelector::selectFirstBitSet32(
4281 Register SrcReg, unsigned BitSetOpcode) const {
4282 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
4285 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
4286 .addImm(BitSetOpcode)
4289}
4290
4291bool SPIRVInstructionSelector::selectFirstBitSet64Overflow(
4293 Register SrcReg, unsigned BitSetOpcode, bool SwapPrimarySide) const {
4294
4295
4296
4297
4298
4300 assert(ComponentCount < 5 && "Vec 5+ will generate invalid SPIR-V ops");
4301
4302 MachineIRBuilder MIRBuilder(I);
4309
4310 std::vector PartialRegs;
4311
4312
4313 unsigned CurrentComponent = 0;
4314 for (; CurrentComponent + 1 < ComponentCount; CurrentComponent += 2) {
4315
4316
4318 MRI->createVirtualRegister(GR.getRegClass(I64x2Type));
4319
4320 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
4321 TII.get(SPIRV::OpVectorShuffle))
4322 .addDef(BitSetResult)
4326 .addImm(CurrentComponent)
4327 .addImm(CurrentComponent + 1);
4328
4330 return false;
4331
4333 MRI->createVirtualRegister(GR.getRegClass(Vec2ResType));
4334
4335 if (!selectFirstBitSet64(SubVecBitSetReg, Vec2ResType, I, BitSetResult,
4336 BitSetOpcode, SwapPrimarySide))
4337 return false;
4338
4339 PartialRegs.push_back(SubVecBitSetReg);
4340 }
4341
4342
4343 if (CurrentComponent != ComponentCount) {
4344 bool ZeroAsNull = !STI.isShader();
4347 ComponentCount - 1, I, BaseType, TII, ZeroAsNull);
4348
4349 if (!selectOpWithSrcs(FinalElemReg, I64Type, I, {SrcReg, ConstIntLastIdx},
4350 SPIRV::OpVectorExtractDynamic))
4351 return false;
4352
4353 Register FinalElemBitSetReg =
4355
4356 if (!selectFirstBitSet64(FinalElemBitSetReg, BaseType, I, FinalElemReg,
4357 BitSetOpcode, SwapPrimarySide))
4358 return false;
4359
4360 PartialRegs.push_back(FinalElemBitSetReg);
4361 }
4362
4363
4364
4365 return selectOpWithSrcs(ResVReg, ResType, I, std::move(PartialRegs),
4366 SPIRV::OpCompositeConstruct);
4367}
4368
4369bool SPIRVInstructionSelector::selectFirstBitSet64(
4371 Register SrcReg, unsigned BitSetOpcode, bool SwapPrimarySide) const {
4374 bool ZeroAsNull = !STI.isShader();
4379
4380
4381
4382
4383
4384 if (ComponentCount > 2) {
4385 return selectFirstBitSet64Overflow(ResVReg, ResType, I, SrcReg,
4386 BitSetOpcode, SwapPrimarySide);
4387 }
4388
4389
4390 MachineIRBuilder MIRBuilder(I);
4392 BaseType, 2 * ComponentCount, MIRBuilder, false);
4394 MRI->createVirtualRegister(GR.getRegClass(PostCastType));
4395
4396 if (!selectOpWithSrcs(BitcastReg, PostCastType, I, {SrcReg},
4397 SPIRV::OpBitcast))
4398 return false;
4399
4400
4402 if (!selectFirstBitSet32(FBSReg, PostCastType, I, BitcastReg, BitSetOpcode))
4403 return false;
4404
4405
4408
4409 bool IsScalarRes = ResType->getOpcode() != SPIRV::OpTypeVector;
4410 if (IsScalarRes) {
4411
4412 if (!selectOpWithSrcs(HighReg, ResType, I, {FBSReg, ConstIntZero},
4413 SPIRV::OpVectorExtractDynamic))
4414 return false;
4415 if (!selectOpWithSrcs(LowReg, ResType, I, {FBSReg, ConstIntOne},
4416 SPIRV::OpVectorExtractDynamic))
4417 return false;
4418 } else {
4419
4420 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
4421 TII.get(SPIRV::OpVectorShuffle))
4425
4427
4428
4429 for (unsigned J = 0; J < ComponentCount * 2; J += 2) {
4431 }
4432
4434 return false;
4435
4436 MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
4437 TII.get(SPIRV::OpVectorShuffle))
4441
4443
4444
4445 for (unsigned J = 1; J < ComponentCount * 2; J += 2) {
4447 }
4449 return false;
4450 }
4451
4452
4453
4458 unsigned SelectOp;
4459 unsigned AddOp;
4460
4461 if (IsScalarRes) {
4462 NegOneReg =
4466 SelectOp = SPIRV::OpSelectSISCond;
4467 AddOp = SPIRV::OpIAddS;
4468 } else {
4470 MIRBuilder, false);
4471 NegOneReg =
4475 SelectOp = SPIRV::OpSelectVIVCond;
4476 AddOp = SPIRV::OpIAddV;
4477 }
4478
4479 Register PrimaryReg = HighReg;
4480 Register SecondaryReg = LowReg;
4481 Register PrimaryShiftReg = Reg32;
4482 Register SecondaryShiftReg = Reg0;
4483
4484
4485
4486 if (SwapPrimarySide) {
4487 PrimaryReg = LowReg;
4488 SecondaryReg = HighReg;
4489 PrimaryShiftReg = Reg0;
4490 SecondaryShiftReg = Reg32;
4491 }
4492
4493
4495 if (!selectOpWithSrcs(BReg, BoolType, I, {PrimaryReg, NegOneReg},
4496 SPIRV::OpIEqual))
4497 return false;
4498
4499
4501 if (!selectOpWithSrcs(TmpReg, ResType, I, {BReg, SecondaryReg, PrimaryReg},
4502 SelectOp))
4503 return false;
4504
4505
4507 if (!selectOpWithSrcs(ValReg, ResType, I,
4508 {BReg, SecondaryShiftReg, PrimaryShiftReg}, SelectOp))
4509 return false;
4510
4511 return selectOpWithSrcs(ResVReg, ResType, I, {ValReg, TmpReg}, AddOp);
4512}
4513
4514bool SPIRVInstructionSelector::selectFirstBitHigh(Register ResVReg,
4516 MachineInstr &I,
4517 bool IsSigned) const {
4518
4519 Register OpReg = I.getOperand(2).getReg();
4521
4522 unsigned ExtendOpcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
4523 unsigned BitSetOpcode = IsSigned ? GL::FindSMsb : GL::FindUMsb;
4524
4526 case 16:
4527 return selectFirstBitSet16(ResVReg, ResType, I, ExtendOpcode, BitSetOpcode);
4528 case 32:
4529 return selectFirstBitSet32(ResVReg, ResType, I, OpReg, BitSetOpcode);
4530 case 64:
4531 return selectFirstBitSet64(ResVReg, ResType, I, OpReg, BitSetOpcode,
4532 false);
4533 default:
4535 "spv_firstbituhigh and spv_firstbitshigh only support 16,32,64 bits.");
4536 }
4537}
4538
4539bool SPIRVInstructionSelector::selectFirstBitLow(Register ResVReg,
4541 MachineInstr &I) const {
4542
4543 Register OpReg = I.getOperand(2).getReg();
4545
4546
4547
4548 unsigned ExtendOpcode = SPIRV::OpUConvert;
4549 unsigned BitSetOpcode = GL::FindILsb;
4550
4552 case 16:
4553 return selectFirstBitSet16(ResVReg, ResType, I, ExtendOpcode, BitSetOpcode);
4554 case 32:
4555 return selectFirstBitSet32(ResVReg, ResType, I, OpReg, BitSetOpcode);
4556 case 64:
4557 return selectFirstBitSet64(ResVReg, ResType, I, OpReg, BitSetOpcode,
4558 true);
4559 default:
4561 }
4562}
4563
4564bool SPIRVInstructionSelector::selectAllocaArray(Register ResVReg,
4566 MachineInstr &I) const {
4567
4568
4569 MachineBasicBlock &BB = *I.getParent();
4570 bool Res = BuildMI(BB, I, I.getDebugLoc(),
4571 TII.get(SPIRV::OpVariableLengthArrayINTEL))
4574 .addUse(I.getOperand(2).getReg())
4577 unsigned Alignment = I.getOperand(3).getImm();
4578 buildOpDecorate(ResVReg, I, TII, SPIRV::Decoration::Alignment, {Alignment});
4579 }
4580 return Res;
4581}
4582
4583bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
4585 MachineInstr &I) const {
4586
4587
4589 bool Res = BuildMI(*It->getParent(), It, It->getDebugLoc(),
4590 TII.get(SPIRV::OpVariable))
4593 .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
4596 unsigned Alignment = I.getOperand(2).getImm();
4598 {Alignment});
4599 }
4600 return Res;
4601}
4602
4603bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
4604
4605
4606
4607
4608 const MachineInstr *PrevI = I.getPrevNode();
4610 if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
4611 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
4614 .addMBB(I.getOperand(0).getMBB())
4616 }
4618 .addMBB(I.getOperand(0).getMBB())
4620}
4621
4622bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633 const MachineInstr *NextI = I.getNextNode();
4634
4635 if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
4636 return true;
4637
4638
4641 MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
4642 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
4643 .addUse(I.getOperand(0).getReg())
4644 .addMBB(I.getOperand(1).getMBB())
4647}
4648
4649bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
4651 MachineInstr &I) const {
4652 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
4655 const unsigned NumOps = I.getNumOperands();
4656 for (unsigned i = 1; i < NumOps; i += 2) {
4657 MIB.addUse(I.getOperand(i + 0).getReg());
4658 MIB.addMBB(I.getOperand(i + 1).getMBB());
4659 }
4663 return Res;
4664}
4665
4666bool SPIRVInstructionSelector::selectGlobalValue(
4667 Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
4668
4669 MachineIRBuilder MIRBuilder(I);
4670 const GlobalValue *GV = I.getOperand(1).getGlobal();
4672
4673 std::string GlobalIdent;
4675 unsigned &ID = UnnamedGlobalIDs[GV];
4676 if (ID == 0)
4677 ID = UnnamedGlobalIDs.size();
4678 GlobalIdent = "__unnamed_" + Twine(ID).str();
4679 } else {
4680 GlobalIdent = GV->getName();
4681 }
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692
4694 const Constant *ConstVal = GV;
4695 MachineBasicBlock &BB = *I.getParent();
4697 if (!NewReg.isValid()) {
4700 STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
4702 : nullptr;
4704 GVType, I,
4705 GVFun ? SPIRV::StorageClass::CodeSectionINTEL
4707 if (GVFun) {
4708
4709
4710
4712 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
4714 MRI->createGenericVirtualRegister(GR.getRegType(ResType));
4715 MRI->setRegClass(FuncVReg, &SPIRV::pIDRegClass);
4716 MachineInstrBuilder MIB1 =
4717 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
4719 .addUse(ResTypeReg);
4720 MachineInstrBuilder MIB2 =
4722 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
4726 GR.add(ConstVal, MIB2);
4727
4731 }
4732 MachineInstrBuilder MIB3 =
4733 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
4736 GR.add(ConstVal, MIB3);
4738 }
4739 assert(NewReg != ResVReg);
4740 return BuildCOPY(ResVReg, NewReg, I);
4741 }
4743 assert(GlobalVar->getName() != "llvm.global.annotations");
4744
4745
4746
4748 return true;
4749
4750 const std::optionalSPIRV::LinkageType::LinkageType LnkType =
4752
4754 SPIRV::StorageClass::StorageClass StorageClass =
4758 ResVReg, ResType, GlobalIdent, GV, StorageClass, Init,
4759 GlobalVar->isConstant(), LnkType, MIRBuilder, true);
4761}
4762
4763bool SPIRVInstructionSelector::selectLog10(Register ResVReg,
4765 MachineInstr &I) const {
4766 if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
4767 return selectExtInst(ResVReg, ResType, I, CL::log10);
4768 }
4769
4770
4771
4772
4773
4774
4775 MachineIRBuilder MIRBuilder(I);
4776 MachineBasicBlock &BB = *I.getParent();
4777
4778
4781 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
4784 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
4788
4789
4791 ResType->getOpcode() == SPIRV::OpTypeFloat);
4792
4793 const SPIRVType *SpirvScalarType =
4794 ResType->getOpcode() == SPIRV::OpTypeVector
4796 : ResType;
4799
4800
4801 auto Opcode = ResType->getOpcode() == SPIRV::OpTypeVector
4802 ? SPIRV::OpVectorTimesScalar
4803 : SPIRV::OpFMulS;
4810}
4811
4812bool SPIRVInstructionSelector::selectModf(Register ResVReg,
4814 MachineInstr &I) const {
4815
4816
4817
4818
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829 if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
4830 MachineIRBuilder MIRBuilder(I);
4831
4833 ResType, MIRBuilder, SPIRV::StorageClass::Function);
4834
4838 PtrTyReg,
4841
4842
4843
4845 MachineBasicBlock &EntryBB = I.getMF()->front();
4848 auto AllocaMIB =
4849 BuildMI(EntryBB, VarPos, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
4852 .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function));
4854
4855 MachineBasicBlock &BB = *I.getParent();
4856
4857 auto MIB =
4858 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
4861 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
4864 .add(I.getOperand(I.getNumExplicitDefs()))
4865 .addUse(Variable);
4866
4867
4868 Register IntegralPartReg = I.getOperand(1).getReg();
4869 if (IntegralPartReg.isValid()) {
4870
4871 auto LoadMIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
4872 .addDef(IntegralPartReg)
4876 }
4877
4879 } else if (STI.canUseExtInstSet(SPIRV::InstructionSet::GLSL_std_450)) {
4880 assert(false && "GLSL::Modf is deprecated.");
4881
4882 return false;
4883 }
4884 return false;
4885}
4886
4887
4888
4889
4890
4891bool SPIRVInstructionSelector::loadVec3BuiltinInputID(
4892 SPIRV::BuiltIn::BuiltIn BuiltInValue, Register ResVReg,
4893 const SPIRVType *ResType, MachineInstr &I) const {
4894 MachineIRBuilder MIRBuilder(I);
4898 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
4899
4900
4905
4906
4907
4910 SPIRV::StorageClass::Input, nullptr, true, std::nullopt, MIRBuilder,
4911 false);
4912
4913
4914 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
4915 Register LoadedRegister = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
4918
4919
4921 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
4922 .addDef(LoadedRegister)
4925
4926
4927
4928 assert(I.getOperand(2).isReg());
4929 const uint32_t ThreadId = foldImm(I.getOperand(2), MRI);
4930
4931
4932 MachineBasicBlock &BB = *I.getParent();
4933 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
4936 .addUse(LoadedRegister)
4939}
4940
4941
4942
4943bool SPIRVInstructionSelector::loadBuiltinInputID(
4944 SPIRV::BuiltIn::BuiltIn BuiltInValue, Register ResVReg,
4945 const SPIRVType *ResType, MachineInstr &I) const {
4946 MachineIRBuilder MIRBuilder(I);
4948 ResType, MIRBuilder, SPIRV::StorageClass::Input);
4949
4950
4954 NewRegister,
4958
4959
4960
4963 SPIRV::StorageClass::Input, nullptr, true, std::nullopt, MIRBuilder,
4964 false);
4965
4966
4967 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
4971
4973}
4974
4976 MachineInstr &I) const {
4977 MachineIRBuilder MIRBuilder(I);
4978 if (Type->getOpcode() != SPIRV::OpTypeVector)
4980
4982 if (VectorSize == 4)
4983 return Type;
4984
4985 Register ScalarTypeReg = Type->getOperand(1).getReg();
4988}
4989
4990bool SPIRVInstructionSelector::loadHandleBeforePosition(
4991 Register &HandleReg, const SPIRVType *ResType, GIntrinsic &HandleDef,
4992 MachineInstr &Pos) const {
4993
4995 Intrinsic::spv_resource_handlefrombinding);
5000 std::string Name =
5002
5003 bool IsStructuredBuffer = ResType->getOpcode() == SPIRV::OpTypePointer;
5004 MachineIRBuilder MIRBuilder(HandleDef);
5006 SPIRV::StorageClass::StorageClass SC = SPIRV::StorageClass::UniformConstant;
5007
5008 if (IsStructuredBuffer) {
5011 }
5012
5013 Register VarReg = buildPointerToResource(VarType, SC, Set, Binding, ArraySize,
5014 IndexReg, Name, MIRBuilder);
5015
5016
5017
5018 uint32_t LoadOpcode =
5019 IsStructuredBuffer ? SPIRV::OpCopyObject : SPIRV::OpLoad;
5027}
5028
5029void SPIRVInstructionSelector::errorIfInstrOutsideShader(
5030 MachineInstr &I) const {
5032 std::string DiagMsg;
5033 raw_string_ostream OS(DiagMsg);
5034 I.print(OS, true, false, false, false);
5035 DiagMsg += " is only supported in shaders.\n";
5037 }
5038}
5039
5040namespace llvm {
5041InstructionSelector *
5045 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
5046}
5047}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
const TargetInstrInfo & TII
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
DXIL Resource Implicit Binding
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
LLVMTypeRef LLVMIntType(unsigned NumBits)
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static StringRef getName(Value *V)
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static APFloat getOneFP(const Type *LLVMFloatTy)
Definition SPIRVInstructionSelector.cpp:2272
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
Definition SPIRVInstructionSelector.cpp:2006
static bool isASCastInGVar(MachineRegisterInfo *MRI, Register ResVReg)
Definition SPIRVInstructionSelector.cpp:2017
static bool mayApplyGenericSelection(unsigned Opcode)
Definition SPIRVInstructionSelector.cpp:807
static APFloat getZeroFP(const Type *LLVMFloatTy)
Definition SPIRVInstructionSelector.cpp:2258
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
Definition SPIRVInstructionSelector.cpp:42
static bool intrinsicHasSideEffects(Intrinsic::ID ID)
Definition SPIRVInstructionSelector.cpp:526
static unsigned getBoolCmpOpcode(unsigned PredNum)
Definition SPIRVInstructionSelector.cpp:2246
static unsigned getICmpOpcode(unsigned PredNum)
Definition SPIRVInstructionSelector.cpp:2206
static bool isOpcodeWithNoSideEffects(unsigned Opcode)
Definition SPIRVInstructionSelector.cpp:591
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry &GR)
Definition SPIRVInstructionSelector.cpp:1457
static bool isConstReg(MachineRegisterInfo *MRI, MachineInstr *OpDef, SmallPtrSet< SPIRVType *, 4 > &Visited)
Definition SPIRVInstructionSelector.cpp:463
static unsigned getPtrCmpOpcode(unsigned Pred)
Definition SPIRVInstructionSelector.cpp:2234
bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Definition SPIRVInstructionSelector.cpp:625
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static const fltSemantics & IEEEsingle()
static const fltSemantics & IEEEdouble()
static const fltSemantics & IEEEhalf()
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Represents a call to an intrinsic.
Intrinsic::ID getIntrinsicID() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
@ InternalLinkage
Rename collisions when linking (static functions).
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert = Opcode .
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
LLVM_ABI unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MOVolatile
The memory access is volatile.
@ MONonTemporal
The memory access is non-temporal.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
defusechain_instr_iterator< true, false, false, true > use_instr_iterator
use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the specified register,...
defusechain_instr_iterator< false, true, false, true > def_instr_iterator
def_instr_iterator/def_instr_begin/def_instr_end - Walk all defs of the specified register,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
Register getOrCreateConstInt(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
SPIRVType * getResultType(Register VReg, MachineFunction *MF=nullptr)
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
MachineInstr * getOrAddMemAliasingINTELInst(MachineIRBuilder &MIRBuilder, const MDNode *AliasingListMD)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
Register getOrCreateUndef(MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
SPIRVType * changePointerStorageClass(SPIRVType *PtrType, SPIRV::StorageClass::StorageClass SC, MachineInstr &I)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
bool isBitcastCompatible(const SPIRVType *Type1, const SPIRVType *Type2) const
unsigned getScalarOrVectorComponentCount(Register VReg) const
SPIRVType * getOrCreateSPIRVFloatType(unsigned BitWidth, MachineInstr &I, const SPIRVInstrInfo &TII)
bool isScalarOrVectorSigned(const SPIRVType *Type) const
Register getOrCreateGlobalVariableWithBinding(const SPIRVType *VarType, uint32_t Set, uint32_t Binding, StringRef Name, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
unsigned getPointerSize() const
SPIRVType * getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
void invalidateMachineInstr(MachineInstr *MI)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
bool findValueAttrs(const MachineInstr *Key, Type *&Ty, StringRef &Name)
void addGlobalObject(const Value *V, const MachineFunction *MF, Register R)
SPIRVType * getScalarOrVectorComponentType(Register VReg) const
void recordFunctionPointer(const MachineOperand *MO, const Function *F)
bool isAggregateType(SPIRVType *Type) const
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
MachineFunction * setCurrentFunc(MachineFunction &MF)
Register getOrCreateConstVector(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Type * getDeducedGlobalValueType(const GlobalValue *Global)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
Register getOrCreateConstFP(APFloat Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
const SPIRVType * retrieveScalarOrVectorIntType(const SPIRVType *Type) const
bool erase(const MachineInstr *MI)
bool add(SPIRV::IRHandle Handle, const MachineInstr *MI)
Register find(SPIRV::IRHandle Handle, const MachineFunction *MF)
bool isPhysicalSPIRV() const
bool isAtLeastSPIRVVer(VersionTuple VerToCompareTo) const
bool canUseExtInstSet(SPIRV::InstructionSet::InstructionSet E) const
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
The instances of the Type class are immutable: once they are created, they are never changed.
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isStructTy() const
True if this is an instance of StructType.
TypeID getTypeID() const
Return the type id for the type.
Value * getOperand(unsigned i) const
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
NodeAddr< DefNode * > Def
NodeAddr< InstrNode * > Instr
NodeAddr< UseNode * > Use
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
int64_t getIConstValSext(Register ConstReg, const MachineRegisterInfo *MRI)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isTypeFoldingSupported(unsigned Opcode)
decltype(auto) dyn_cast(const From &Val)
dyn_cast - Return the argument parameter cast to the specified type.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
unsigned getArrayComponentCount(const MachineRegisterInfo *MRI, const MachineInstr *ResType)
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
MachineBasicBlock::iterator getFirstValidInstructionInsertPoint(MachineBasicBlock &BB)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
Type * toTypedPointer(Type *Ty)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
const MachineInstr SPIRVType
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
MachineInstr * passCopy(MachineInstr *Def, const MachineRegisterInfo *MRI)
bool isa(const From &Val)
isa - Return true if the parameter to the template is an instance of one of the template type argu...
std::optional< SPIRV::LinkageType::LinkageType > getSpirvLinkageTypeFor(const SPIRVSubtarget &ST, const GlobalValue &GV)
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
Definition SPIRVInstructionSelector.cpp:5042
std::string getStringValueFromReg(Register Reg, MachineRegisterInfo &MRI)
int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
DWARFExpression::Operation Op
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast - Return the argument parameter cast to the specified type.
bool hasInitializer(const GlobalVariable *GV)
MachineInstr * getVRegDef(MachineRegisterInfo &MRI, Register Reg)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
LLVM_ABI bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...