LLVM: lib/Target/SPIRV/SPIRVModuleAnalysis.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
27
28using namespace llvm;
29
30#define DEBUG_TYPE "spirv-module-analysis"
31
34 cl::desc("Dump MIR with SPIR-V dependencies info"),
36
39 cl::desc("SPIR-V capabilities to avoid if there are "
40 "other options enabling a feature"),
43 "SPIR-V Shader capability")));
44
50 }
51};
52
54
55namespace llvm {
57}
58
60 true)
61
62
63static unsigned getMetadataUInt(MDNode *MdNode, unsigned OpIndex,
65 if (MdNode && OpIndex < MdNode->getNumOperands()) {
66 const auto &Op = MdNode->getOperand(OpIndex);
67 return mdconst::extract(Op)->getZExtValue();
68 }
70}
71
73getSymbolicOperandRequirements(SPIRV::OperandCategory::OperandCategory Category,
77 AvoidCaps;
78
82 bool MinVerOK = SPIRVVersion.empty() || SPIRVVersion >= ReqMinVer;
83 bool MaxVerOK =
84 ReqMaxVer.empty() || SPIRVVersion.empty() || SPIRVVersion <= ReqMaxVer;
87 if (ReqCaps.empty()) {
88 if (ReqExts.empty()) {
89 if (MinVerOK && MaxVerOK)
90 return {true, {}, {}, ReqMinVer, ReqMaxVer};
92 }
93 } else if (MinVerOK && MaxVerOK) {
94 if (ReqCaps.size() == 1) {
95 auto Cap = ReqCaps[0];
97 return {true, {Cap}, ReqExts, ReqMinVer, ReqMaxVer};
98 } else {
99
100
101
102
103
104
106 for (auto Cap : ReqCaps)
109 for (size_t i = 0, Sz = UseCaps.size(); i < Sz; ++i) {
110 auto Cap = UseCaps[i];
111 if (i == Sz - 1 || !AvoidCaps.S.contains(Cap))
112 return {true, {Cap}, ReqExts, ReqMinVer, ReqMaxVer};
113 }
114 }
115 }
116
117
118
119 if (llvm::all_of(ReqExts, [&ST](const SPIRV::Extension::Extension &Ext) {
120 return ST.canUseExtension(Ext);
121 })) {
122 return {true,
123 {},
124 ReqExts,
126 VersionTuple()};
127 }
129}
130
131void SPIRVModuleAnalysis::setBaseInfo(const Module &M) {
142
143
144 if (auto MemModel = M.getNamedMetadata("spirv.MemoryModel")) {
145 auto MemMD = MemModel->getOperand(0);
146 MAI.Addr = static_castSPIRV::AddressingModel::AddressingModel\(
147 getMetadataUInt(MemMD, 0));
149 static_castSPIRV::MemoryModel::MemoryModel\(getMetadataUInt(MemMD, 1));
150 } else {
151
152 MAI.Mem = ST->isOpenCLEnv() ? SPIRV::MemoryModel::OpenCL
153 : SPIRV::MemoryModel::GLSL450;
154 if (MAI.Mem == SPIRV::MemoryModel::OpenCL) {
155 unsigned PtrSize = ST->getPointerSize();
156 MAI.Addr = PtrSize == 32 ? SPIRV::AddressingModel::Physical32
157 : PtrSize == 64 ? SPIRV::AddressingModel::Physical64
158 : SPIRV::AddressingModel::Logical;
159 } else {
160
161 MAI.Addr = SPIRV::AddressingModel::Logical;
162 }
163 }
164
165
166 if (auto VerNode = M.getNamedMetadata("opencl.ocl.version")) {
167 MAI.SrcLang = SPIRV::SourceLanguage::OpenCL_C;
168
169
170 assert(VerNode->getNumOperands() > 0 && "Invalid SPIR");
171 auto VersionMD = VerNode->getOperand(0);
172 unsigned MajorNum = getMetadataUInt(VersionMD, 0, 2);
173 unsigned MinorNum = getMetadataUInt(VersionMD, 1);
174 unsigned RevNum = getMetadataUInt(VersionMD, 2);
175
177 (std::max(1U, MajorNum) * 100 + MinorNum) * 1000 + RevNum;
178 } else {
179
180
181
182
183 if (ST->isOpenCLEnv()) {
184 MAI.SrcLang = SPIRV::SourceLanguage::OpenCL_CPP;
186 } else {
187 MAI.SrcLang = SPIRV::SourceLanguage::Unknown;
189 }
190 }
191
192 if (auto ExtNode = M.getNamedMetadata("opencl.used.extensions")) {
193 for (unsigned I = 0, E = ExtNode->getNumOperands(); I != E; ++I) {
196 continue;
199 }
200 }
201
202
203
210
211 if (ST->isOpenCLEnv()) {
212
214 SPIRV::InstructionSet::OpenCL_std)] =
216 }
217}
218
219
220
221
222
225 bool UseDefReg) {
227 for (unsigned i = 0; i < MI.getNumOperands(); ++i) {
229 size_t h;
230 if (MO.isReg()) {
231 if (!UseDefReg && MO.isDef())
232 continue;
234 if (!RegAlias.isValid()) {
236 dbgs() << "Unexpectedly, no global id found for the operand ";
238 dbgs() << "\nInstruction: ";
240 dbgs() << "\n";
241 });
242 report_fatal_error("All v-regs must have been mapped to global id's");
243 }
244
247 } else {
249 }
250 Signature.push_back(h);
251 }
252 return Signature;
253}
254
257 unsigned Opcode = MI.getOpcode();
258 switch (Opcode) {
259 case SPIRV::OpTypeForwardPointer:
260
261 return false;
262 case SPIRV::OpVariable:
263 return static_castSPIRV::StorageClass::StorageClass\(
264 MI.getOperand(2).getImm()) != SPIRV::StorageClass::Function;
265 case SPIRV::OpFunction:
266 case SPIRV::OpFunctionParameter:
267 return true;
268 }
269 if (GR->hasConstFunPtr() && Opcode == SPIRV::OpUndef) {
270 Register DefReg = MI.getOperand(0).getReg();
272 if (UseMI.getOpcode() != SPIRV::OpConstantFunctionPointerINTEL)
273 continue;
274
275
279 return false;
280 }
281 }
284}
285
286
287
288
289void SPIRVModuleAnalysis::visitFunPtrUse(
291 std::map<const Value *, unsigned> &GlobalToGReg, const MachineFunction *MF,
296
298 assert(OpDefMI && OpDefMI->getOpcode() == SPIRV::OpFunction);
301 do {
302 visitDecl(FunDefMRI, SignatureToGReg, GlobalToGReg, FunDefMF, *OpDefMI);
304 } while (OpDefMI && (OpDefMI->getOpcode() == SPIRV::OpFunction ||
305 OpDefMI->getOpcode() == SPIRV::OpFunctionParameter));
306
309 "Function definition must refer to a global register");
311}
312
313
314
315void SPIRVModuleAnalysis::visitDecl(
317 std::map<const Value *, unsigned> &GlobalToGReg, const MachineFunction *MF,
319 unsigned Opcode = MI.getOpcode();
321
322
325 continue;
327
328 if (Opcode == SPIRV::OpConstantFunctionPointerINTEL &&
329 MRI.getRegClass(OpReg) == &SPIRV::pIDRegClass) {
330 visitFunPtrUse(OpReg, SignatureToGReg, GlobalToGReg, MF, MI);
331 continue;
332 }
333
335 continue;
336
337 if (const MachineInstr *OpDefMI = MRI.getUniqueVRegDef(OpReg)) {
338 if (isDeclSection(MRI, *OpDefMI))
339 visitDecl(MRI, SignatureToGReg, GlobalToGReg, MF, *OpDefMI);
340 continue;
341 }
342
343
345 dbgs() << "Unexpectedly, no unique definition for the operand ";
347 dbgs() << "\nInstruction: ";
349 dbgs() << "\n";
350 });
352 "No unique definition is found for the virtual register");
353 }
354
356 bool IsFunDef = false;
360 } else if (Opcode == SPIRV::OpFunction ||
361 Opcode == SPIRV::OpFunctionParameter) {
362 GReg = handleFunctionOrParameter(MF, MI, GlobalToGReg, IsFunDef);
365 GReg = handleTypeDeclOrConstant(MI, SignatureToGReg);
366 } else if (Opcode == SPIRV::OpVariable) {
367 GReg = handleVariable(MF, MI, GlobalToGReg);
368 } else {
370 dbgs() << "\nInstruction: ";
372 dbgs() << "\n";
373 });
375 }
377 if (!IsFunDef)
379}
380
381Register SPIRVModuleAnalysis::handleFunctionOrParameter(
383 std::map<const Value *, unsigned> &GlobalToGReg, bool &IsFunDef) {
385 assert(GObj && "Unregistered global definition");
386 const Function *F = dyn_cast(GObj);
387 if ()
388 F = dyn_cast(GObj)->getParent();
389 assert(F && "Expected a reference to a function or an argument");
390 IsFunDef = ->isDeclaration();
391 auto It = GlobalToGReg.find(GObj);
392 if (It != GlobalToGReg.end())
393 return It->second;
395 GlobalToGReg[GObj] = GReg;
396 if (!IsFunDef)
398 return GReg;
399}
400
402SPIRVModuleAnalysis::handleTypeDeclOrConstant(const MachineInstr &MI,
405 auto It = SignatureToGReg.find(MISign);
406 if (It != SignatureToGReg.end())
407 return It->second;
409 SignatureToGReg[MISign] = GReg;
411 return GReg;
412}
413
414Register SPIRVModuleAnalysis::handleVariable(
416 std::map<const Value *, unsigned> &GlobalToGReg) {
419 assert(GObj && "Unregistered global definition");
420 auto It = GlobalToGReg.find(GObj);
421 if (It != GlobalToGReg.end())
422 return It->second;
424 GlobalToGReg[GObj] = GReg;
426 return GReg;
427}
428
429void SPIRVModuleAnalysis::collectDeclarations(const Module &M) {
431 std::map<const Value *, unsigned> GlobalToGReg;
432 for (auto F = M.begin(), E = M.end(); F != E; ++F) {
434 if (!MF)
435 continue;
437 unsigned PastHeader = 0;
440 if (MI.getNumOperands() == 0)
441 continue;
442 unsigned Opcode = MI.getOpcode();
443 if (Opcode == SPIRV::OpFunction) {
444 if (PastHeader == 0) {
445 PastHeader = 1;
446 continue;
447 }
448 } else if (Opcode == SPIRV::OpFunctionParameter) {
449 if (PastHeader < 2)
450 continue;
451 } else if (PastHeader > 0) {
452 PastHeader = 2;
453 }
454
456 switch (Opcode) {
457 case SPIRV::OpExtension:
460 break;
461 case SPIRV::OpCapability:
464 if (PastHeader > 0)
465 PastHeader = 2;
466 break;
467 default:
468 if (DefMO.isReg() && isDeclSection(MRI, MI) &&
470 visitDecl(MRI, SignatureToGReg, GlobalToGReg, MF, MI);
471 }
472 }
473 }
474 }
475}
476
477
478
479
480
481void SPIRVModuleAnalysis::collectFuncNames(MachineInstr &MI,
483 if (MI.getOpcode() == SPIRV::OpDecorate) {
484
485 auto Dec = MI.getOperand(1).getImm();
486 if (Dec == static_cast<unsigned>(SPIRV::Decoration::LinkageAttributes)) {
487 auto Lnk = MI.getOperand(MI.getNumOperands() - 1).getImm();
488 if (Lnk == static_cast<unsigned>(SPIRV::LinkageType::Import)) {
489
490 const Function *ImportedFunc =
494 }
495 }
496 } else if (MI.getOpcode() == SPIRV::OpFunction) {
497
502 }
503}
504
505
506
507
510 bool Append = true) {
513 auto FoundMI = IS.insert(MISign);
514 if (!FoundMI.second)
515 return;
516
517 if (Append)
519 else
521}
522
523
524
525void SPIRVModuleAnalysis::processOtherInstrs(const Module &M) {
527 for (auto F = M.begin(), E = M.end(); F != E; ++F) {
528 if ((*F).isDeclaration())
529 continue;
532
536 continue;
537 const unsigned OpCode = MI.getOpcode();
538 if (OpCode == SPIRV::OpString) {
540 } else if (OpCode == SPIRV::OpExtInst && MI.getOperand(2).isImm() &&
541 MI.getOperand(2).getImm() ==
542 SPIRV::InstructionSet::
543 NonSemantic_Shader_DebugInfo_100) {
545 namespace NS = SPIRV::NonSemanticExtInst;
546 static constexpr int64_t GlobalNonSemanticDITy[] = {
547 NS::DebugSource, NS::DebugCompilationUnit, NS::DebugInfoNone,
548 NS::DebugTypeBasic, NS::DebugTypePointer};
549 bool IsGlobalDI = false;
550 for (unsigned Idx = 0; Idx < std::size(GlobalNonSemanticDITy); ++Idx)
551 IsGlobalDI |= Ins.getImm() == GlobalNonSemanticDITy[Idx];
552 if (IsGlobalDI)
554 } else if (OpCode == SPIRV::OpName || OpCode == SPIRV::OpMemberName) {
556 } else if (OpCode == SPIRV::OpEntryPoint) {
558 } else if (TII->isDecorationInstr(MI)) {
560 collectFuncNames(MI, &*F);
561 } else if (TII->isConstantInstr(MI)) {
562
563
565 } else if (OpCode == SPIRV::OpFunction) {
566 collectFuncNames(MI, &*F);
567 } else if (OpCode == SPIRV::OpTypeForwardPointer) {
569 }
570 }
571 }
572}
573
574
575
576
577void SPIRVModuleAnalysis::numberRegistersGlobally(const Module &M) {
578 for (auto F = M.begin(), E = M.end(); F != E; ++F) {
579 if ((*F).isDeclaration())
580 continue;
586 if (.isReg())
587 continue;
590 continue;
593 }
594 if (MI.getOpcode() != SPIRV::OpExtInst)
595 continue;
596 auto Set = MI.getOperand(2).getImm();
599 }
600 }
601 }
602}
603
604
606 SPIRV::OperandCategory::OperandCategory Category, uint32_t i,
608 addRequirements(getSymbolicOperandRequirements(Category, i, ST, *this));
609}
610
611void SPIRV::RequirementHandler::recursiveAddCapabilities(
613 for (const auto &Cap : ToPrune) {
614 AllCaps.insert(Cap);
617 recursiveAddCapabilities(ImplicitDecls);
618 }
619}
620
622 for (const auto &Cap : ToAdd) {
623 bool IsNewlyInserted = AllCaps.insert(Cap).second;
624 if (!IsNewlyInserted)
625 continue;
628 recursiveAddCapabilities(ImplicitDecls);
629 MinimalCaps.push_back(Cap);
630 }
631}
632
636 report_fatal_error("Adding SPIR-V requirements this target can't satisfy.");
637
638 if (Req.Cap.has_value())
639 addCapabilities({Req.Cap.value()});
640
641 addExtensions(Req.Exts);
642
644 if (!MaxVersion.empty() && Req.MinVer > MaxVersion) {
646 << " and <= " << MaxVersion << "\n");
647 report_fatal_error("Adding SPIR-V requirements that can't be satisfied.");
648 }
649
650 if (MinVersion.empty() || Req.MinVer > MinVersion)
651 MinVersion = Req.MinVer;
652 }
653
655 if (!MinVersion.empty() && Req.MaxVer < MinVersion) {
657 << " and >= " << MinVersion << "\n");
658 report_fatal_error("Adding SPIR-V requirements that can't be satisfied.");
659 }
660
661 if (MaxVersion.empty() || Req.MaxVer < MaxVersion)
662 MaxVersion = Req.MaxVer;
663 }
664}
665
668
669 bool IsSatisfiable = true;
670 auto TargetVer = ST.getSPIRVVersion();
671
672 if (!MaxVersion.empty() && !TargetVer.empty() && MaxVersion < TargetVer) {
674 dbgs() << "Target SPIR-V version too high for required features\n"
675 << "Required max version: " << MaxVersion << " target version "
676 << TargetVer << "\n");
677 IsSatisfiable = false;
678 }
679
680 if (!MinVersion.empty() && !TargetVer.empty() && MinVersion > TargetVer) {
681 LLVM_DEBUG(dbgs() << "Target SPIR-V version too low for required features\n"
682 << "Required min version: " << MinVersion
683 << " target version " << TargetVer << "\n");
684 IsSatisfiable = false;
685 }
686
687 if (!MinVersion.empty() && !MaxVersion.empty() && MinVersion > MaxVersion) {
690 << "Version is too low for some features and too high for others.\n"
691 << "Required SPIR-V min version: " << MinVersion
692 << " required SPIR-V max version " << MaxVersion << "\n");
693 IsSatisfiable = false;
694 }
695
696 for (auto Cap : MinimalCaps) {
697 if (AvailableCaps.contains(Cap))
698 continue;
701 OperandCategory::CapabilityOperand, Cap)
702 << "\n");
703 IsSatisfiable = false;
704 }
705
706 for (auto Ext : AllExtensions) {
707 if (ST.canUseExtension(Ext))
708 continue;
711 OperandCategory::ExtensionOperand, Ext)
712 << "\n");
713 IsSatisfiable = false;
714 }
715
716 if (!IsSatisfiable)
717 report_fatal_error("Unable to meet SPIR-V requirements for this target.");
718}
719
720
722 for (const auto Cap : ToAdd)
723 if (AvailableCaps.insert(Cap).second)
725 SPIRV::OperandCategory::CapabilityOperand, Cap));
726}
727
729 const Capability::Capability ToRemove,
730 const Capability::Capability IfPresent) {
731 if (AllCaps.contains(IfPresent))
733}
734
735namespace llvm {
737void RequirementHandler::initAvailableCapabilities(const SPIRVSubtarget &ST) {
738
739 addAvailableCaps({Capability::Shader, Capability::Linkage, Capability::Int8,
740 Capability::Int16});
741
743 addAvailableCaps({Capability::GroupNonUniform,
744 Capability::GroupNonUniformVote,
745 Capability::GroupNonUniformArithmetic,
746 Capability::GroupNonUniformBallot,
747 Capability::GroupNonUniformClustered,
748 Capability::GroupNonUniformShuffle,
749 Capability::GroupNonUniformShuffleRelative});
750
752 addAvailableCaps({Capability::DotProduct, Capability::DotProductInputAll,
753 Capability::DotProductInput4x8Bit,
754 Capability::DotProductInput4x8BitPacked,
755 Capability::DemoteToHelperInvocation});
756
757
758 for (auto Extension : ST.getAllAvailableExtensions()) {
761 addAvailableCaps(EnabledCapabilities);
762 }
763
764 if (ST.isOpenCLEnv()) {
765 initAvailableCapabilitiesForOpenCL(ST);
766 return;
767 }
768
769 if (ST.isVulkanEnv()) {
770 initAvailableCapabilitiesForVulkan(ST);
771 return;
772 }
773
774 report_fatal_error("Unimplemented environment for SPIR-V generation.");
775}
776
777void RequirementHandler::initAvailableCapabilitiesForOpenCL(
779
780 addAvailableCaps({Capability::Addresses, Capability::Float16Buffer,
781 Capability::Kernel, Capability::Vector16,
782 Capability::Groups, Capability::GenericPointer,
783 Capability::StorageImageWriteWithoutFormat,
784 Capability::StorageImageReadWithoutFormat});
785 if (ST.hasOpenCLFullProfile())
786 addAvailableCaps({Capability::Int64, Capability::Int64Atomics});
787 if (ST.hasOpenCLImageSupport()) {
788 addAvailableCaps({Capability::ImageBasic, Capability::LiteralSampler,
789 Capability::Image1D, Capability::SampledBuffer,
790 Capability::ImageBuffer});
792 addAvailableCaps({Capability::ImageReadWrite});
793 }
796 addAvailableCaps({Capability::SubgroupDispatch, Capability::PipeStorage});
798 addAvailableCaps({Capability::DenormPreserve, Capability::DenormFlushToZero,
799 Capability::SignedZeroInfNanPreserve,
800 Capability::RoundingModeRTE,
801 Capability::RoundingModeRTZ});
802
803 addAvailableCaps({Capability::Float16, Capability::Float64});
804
805
806}
807
808void RequirementHandler::initAvailableCapabilitiesForVulkan(
810
811
812 addAvailableCaps({Capability::Int64, Capability::Float16, Capability::Float64,
813 Capability::GroupNonUniform, Capability::Image1D,
814 Capability::SampledBuffer, Capability::ImageBuffer,
815 Capability::UniformBufferArrayDynamicIndexing,
816 Capability::SampledImageArrayDynamicIndexing,
817 Capability::StorageBufferArrayDynamicIndexing,
818 Capability::StorageImageArrayDynamicIndexing});
819
820
822 addAvailableCaps(
823 {Capability::ShaderNonUniformEXT, Capability::RuntimeDescriptorArrayEXT,
824 Capability::InputAttachmentArrayDynamicIndexingEXT,
825 Capability::UniformTexelBufferArrayDynamicIndexingEXT,
826 Capability::StorageTexelBufferArrayDynamicIndexingEXT,
827 Capability::UniformBufferArrayNonUniformIndexingEXT,
828 Capability::SampledImageArrayNonUniformIndexingEXT,
829 Capability::StorageBufferArrayNonUniformIndexingEXT,
830 Capability::StorageImageArrayNonUniformIndexingEXT,
831 Capability::InputAttachmentArrayNonUniformIndexingEXT,
832 Capability::UniformTexelBufferArrayNonUniformIndexingEXT,
833 Capability::StorageTexelBufferArrayNonUniformIndexingEXT});
834 }
835
836
838 addAvailableCaps({Capability::StorageImageWriteWithoutFormat,
839 Capability::StorageImageReadWithoutFormat});
840}
841
842}
843}
844
845
846
847static void addOpDecorateReqs(const MachineInstr &MI, unsigned DecIndex,
850 int64_t DecOp = MI.getOperand(DecIndex).getImm();
851 auto Dec = static_castSPIRV::Decoration::Decoration\(DecOp);
853 SPIRV::OperandCategory::DecorationOperand, Dec, ST, Reqs));
854
855 if (Dec == SPIRV::Decoration::BuiltIn) {
856 int64_t BuiltInOp = MI.getOperand(DecIndex + 1).getImm();
857 auto BuiltIn = static_castSPIRV::BuiltIn::BuiltIn\(BuiltInOp);
859 SPIRV::OperandCategory::BuiltInOperand, BuiltIn, ST, Reqs));
860 } else if (Dec == SPIRV::Decoration::LinkageAttributes) {
861 int64_t LinkageOp = MI.getOperand(MI.getNumOperands() - 1).getImm();
862 SPIRV::LinkageType::LinkageType LnkType =
863 static_castSPIRV::LinkageType::LinkageType\(LinkageOp);
864 if (LnkType == SPIRV::LinkageType::LinkOnceODR)
865 Reqs.addExtension(SPIRV::Extension::SPV_KHR_linkonce_odr);
866 } else if (Dec == SPIRV::Decoration::CacheControlLoadINTEL ||
867 Dec == SPIRV::Decoration::CacheControlStoreINTEL) {
868 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_cache_controls);
869 } else if (Dec == SPIRV::Decoration::HostAccessINTEL) {
870 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_global_variable_host_access);
871 } else if (Dec == SPIRV::Decoration::InitModeINTEL ||
872 Dec == SPIRV::Decoration::ImplementInRegisterMapINTEL) {
874 SPIRV::Extension::SPV_INTEL_global_variable_fpga_decorations);
875 } else if (Dec == SPIRV::Decoration::NonUniformEXT) {
876 Reqs.addRequirements(SPIRV::Capability::ShaderNonUniformEXT);
877 }
878}
879
880
881static void addOpTypeImageReqs(const MachineInstr &MI,
884 assert(MI.getNumOperands() >= 8 && "Insufficient operands for OpTypeImage");
885
886
887 int64_t ImgFormatOp = MI.getOperand(7).getImm();
888 auto ImgFormat = static_castSPIRV::ImageFormat::ImageFormat\(ImgFormatOp);
890 ImgFormat, ST);
891
892 bool IsArrayed = MI.getOperand(4).getImm() == 1;
893 bool IsMultisampled = MI.getOperand(5).getImm() == 1;
894 bool NoSampler = MI.getOperand(6).getImm() == 2;
895
896 assert(MI.getOperand(2).isImm());
897 switch (MI.getOperand(2).getImm()) {
898 case SPIRV::Dim::DIM_1D:
899 Reqs.addRequirements(NoSampler ? SPIRV::Capability::Image1D
900 : SPIRV::Capability::Sampled1D);
901 break;
902 case SPIRV::Dim::DIM_2D:
903 if (IsMultisampled && NoSampler)
905 break;
906 case SPIRV::Dim::DIM_Cube:
908 if (IsArrayed)
909 Reqs.addRequirements(NoSampler ? SPIRV::Capability::ImageCubeArray
910 : SPIRV::Capability::SampledCubeArray);
911 break;
912 case SPIRV::Dim::DIM_Rect:
913 Reqs.addRequirements(NoSampler ? SPIRV::Capability::ImageRect
914 : SPIRV::Capability::SampledRect);
915 break;
916 case SPIRV::Dim::DIM_Buffer:
917 Reqs.addRequirements(NoSampler ? SPIRV::Capability::ImageBuffer
918 : SPIRV::Capability::SampledBuffer);
919 break;
920 case SPIRV::Dim::DIM_SubpassData:
921 Reqs.addRequirements(SPIRV::Capability::InputAttachment);
922 break;
923 }
924
925
926 if (ST.isOpenCLEnv()) {
927 if (MI.getNumOperands() > 8 &&
928 MI.getOperand(8).getImm() == SPIRV::AccessQualifier::ReadWrite)
930 else
932 }
933}
934
935
936#define ATOM_FLT_REQ_EXT_MSG(ExtName) \
937 "The atomic float instruction requires the following SPIR-V " \
938 "extension: SPV_EXT_shader_atomic_float" ExtName
939static void AddAtomicFloatRequirements(const MachineInstr &MI,
942 assert(MI.getOperand(1).isReg() &&
943 "Expect register operand in atomic float instruction");
944 Register TypeReg = MI.getOperand(1).getReg();
945 SPIRVType *TypeDef = MI.getMF()->getRegInfo().getVRegDef(TypeReg);
946 if (TypeDef->getOpcode() != SPIRV::OpTypeFloat)
947 report_fatal_error("Result type of an atomic float instruction must be a "
948 "floating-point type scalar");
949
951 unsigned Op = MI.getOpcode();
952 if (Op == SPIRV::OpAtomicFAddEXT) {
953 if (.canUseExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_add))
955 Reqs.addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_add);
957 case 16:
958 if (.canUseExtension(
959 SPIRV::Extension::SPV_EXT_shader_atomic_float16_add))
961 Reqs.addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float16_add);
962 Reqs.addCapability(SPIRV::Capability::AtomicFloat16AddEXT);
963 break;
964 case 32:
965 Reqs.addCapability(SPIRV::Capability::AtomicFloat32AddEXT);
966 break;
967 case 64:
968 Reqs.addCapability(SPIRV::Capability::AtomicFloat64AddEXT);
969 break;
970 default:
972 "Unexpected floating-point type width in atomic float instruction");
973 }
974 } else {
975 if (.canUseExtension(
976 SPIRV::Extension::SPV_EXT_shader_atomic_float_min_max))
978 Reqs.addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_min_max);
980 case 16:
981 Reqs.addCapability(SPIRV::Capability::AtomicFloat16MinMaxEXT);
982 break;
983 case 32:
984 Reqs.addCapability(SPIRV::Capability::AtomicFloat32MinMaxEXT);
985 break;
986 case 64:
987 Reqs.addCapability(SPIRV::Capability::AtomicFloat64MinMaxEXT);
988 break;
989 default:
991 "Unexpected floating-point type width in atomic float instruction");
992 }
993 }
994}
995
996bool isUniformTexelBuffer(MachineInstr *ImageInst) {
997 if (ImageInst->getOpcode() != SPIRV::OpTypeImage)
998 return false;
1001 return Dim == SPIRV::Dim::DIM_Buffer && Sampled == 1;
1002}
1003
1004bool isStorageTexelBuffer(MachineInstr *ImageInst) {
1005 if (ImageInst->getOpcode() != SPIRV::OpTypeImage)
1006 return false;
1009 return Dim == SPIRV::Dim::DIM_Buffer && Sampled == 2;
1010}
1011
1012bool isSampledImage(MachineInstr *ImageInst) {
1013 if (ImageInst->getOpcode() != SPIRV::OpTypeImage)
1014 return false;
1017 return Dim != SPIRV::Dim::DIM_Buffer && Sampled == 1;
1018}
1019
1020bool isInputAttachment(MachineInstr *ImageInst) {
1021 if (ImageInst->getOpcode() != SPIRV::OpTypeImage)
1022 return false;
1025 return Dim == SPIRV::Dim::DIM_SubpassData && Sampled == 2;
1026}
1027
1028bool isStorageImage(MachineInstr *ImageInst) {
1029 if (ImageInst->getOpcode() != SPIRV::OpTypeImage)
1030 return false;
1033 return Dim != SPIRV::Dim::DIM_Buffer && Sampled == 2;
1034}
1035
1036bool isCombinedImageSampler(MachineInstr *SampledImageInst) {
1037 if (SampledImageInst->getOpcode() != SPIRV::OpTypeSampledImage)
1038 return false;
1039
1042 auto *ImageInst = MRI.getUniqueVRegDef(ImageReg);
1043 return isSampledImage(ImageInst);
1044}
1045
1047 for (const auto &MI : MRI.reg_instructions(Reg)) {
1048 if (MI.getOpcode() != SPIRV::OpDecorate)
1049 continue;
1050
1051 uint32_t Dec = MI.getOperand(1).getImm();
1052 if (Dec == SPIRV::Decoration::NonUniformEXT)
1053 return true;
1054 }
1055 return false;
1056}
1057
1058void addOpAccessChainReqs(const MachineInstr &Instr,
1062
1063
1064
1065 Register ResTypeReg = Instr.getOperand(1).getReg();
1066 MachineInstr *ResTypeInst = MRI.getUniqueVRegDef(ResTypeReg);
1067
1068 assert(ResTypeInst->getOpcode() == SPIRV::OpTypePointer);
1070 if (StorageClass != SPIRV::StorageClass::StorageClass::UniformConstant &&
1071 StorageClass != SPIRV::StorageClass::StorageClass::Uniform &&
1072 StorageClass != SPIRV::StorageClass::StorageClass::StorageBuffer) {
1073 return;
1074 }
1075
1077 MachineInstr *PointeeType = MRI.getUniqueVRegDef(PointeeTypeReg);
1078 if (PointeeType->getOpcode() != SPIRV::OpTypeImage &&
1079 PointeeType->getOpcode() != SPIRV::OpTypeSampledImage &&
1080 PointeeType->getOpcode() != SPIRV::OpTypeSampler) {
1081 return;
1082 }
1083
1084 bool IsNonUniform =
1085 hasNonUniformDecoration(Instr.getOperand(0).getReg(), MRI);
1086 if (isUniformTexelBuffer(PointeeType)) {
1087 if (IsNonUniform)
1089 SPIRV::Capability::UniformTexelBufferArrayNonUniformIndexingEXT);
1090 else
1092 SPIRV::Capability::UniformTexelBufferArrayDynamicIndexingEXT);
1093 } else if (isInputAttachment(PointeeType)) {
1094 if (IsNonUniform)
1096 SPIRV::Capability::InputAttachmentArrayNonUniformIndexingEXT);
1097 else
1099 SPIRV::Capability::InputAttachmentArrayDynamicIndexingEXT);
1100 } else if (isStorageTexelBuffer(PointeeType)) {
1101 if (IsNonUniform)
1103 SPIRV::Capability::StorageTexelBufferArrayNonUniformIndexingEXT);
1104 else
1106 SPIRV::Capability::StorageTexelBufferArrayDynamicIndexingEXT);
1107 } else if (isSampledImage(PointeeType) ||
1108 isCombinedImageSampler(PointeeType) ||
1109 PointeeType->getOpcode() == SPIRV::OpTypeSampler) {
1110 if (IsNonUniform)
1112 SPIRV::Capability::SampledImageArrayNonUniformIndexingEXT);
1113 else
1115 SPIRV::Capability::SampledImageArrayDynamicIndexing);
1116 } else if (isStorageImage(PointeeType)) {
1117 if (IsNonUniform)
1119 SPIRV::Capability::StorageImageArrayNonUniformIndexingEXT);
1120 else
1122 SPIRV::Capability::StorageImageArrayDynamicIndexing);
1123 }
1124}
1125
1126static bool isImageTypeWithUnknownFormat(SPIRVType *TypeInst) {
1127 if (TypeInst->getOpcode() != SPIRV::OpTypeImage)
1128 return false;
1131}
1132
1133static void AddDotProductRequirements(const MachineInstr &MI,
1136 if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product))
1137 Reqs.addExtension(SPIRV::Extension::SPV_KHR_integer_dot_product);
1138 Reqs.addCapability(SPIRV::Capability::DotProduct);
1139
1141 assert(MI.getOperand(2).isReg() && "Unexpected operand in dot");
1142
1143
1144 const MachineInstr *Input = MRI.getVRegDef(MI.getOperand(2).getReg());
1147
1148 SPIRVType *TypeDef = MRI.getVRegDef(InputReg);
1149 if (TypeDef->getOpcode() == SPIRV::OpTypeInt) {
1151 Reqs.addCapability(SPIRV::Capability::DotProductInput4x8BitPacked);
1152 } else if (TypeDef->getOpcode() == SPIRV::OpTypeVector) {
1157 "Dot operand of 8-bit integer type requires 4 components");
1158 Reqs.addCapability(SPIRV::Capability::DotProductInput4x8Bit);
1159 } else {
1160 Reqs.addCapability(SPIRV::Capability::DotProductInputAll);
1161 }
1162 }
1163}
1164
1168 switch (MI.getOpcode()) {
1169 case SPIRV::OpMemoryModel: {
1170 int64_t Addr = MI.getOperand(0).getImm();
1173 int64_t Mem = MI.getOperand(1).getImm();
1175 ST);
1176 break;
1177 }
1178 case SPIRV::OpEntryPoint: {
1179 int64_t Exe = MI.getOperand(0).getImm();
1181 Exe, ST);
1182 break;
1183 }
1184 case SPIRV::OpExecutionMode:
1185 case SPIRV::OpExecutionModeId: {
1186 int64_t Exe = MI.getOperand(1).getImm();
1188 Exe, ST);
1189 break;
1190 }
1191 case SPIRV::OpTypeMatrix:
1193 break;
1194 case SPIRV::OpTypeInt: {
1195 unsigned BitWidth = MI.getOperand(1).getImm();
1202 break;
1203 }
1204 case SPIRV::OpTypeFloat: {
1205 unsigned BitWidth = MI.getOperand(1).getImm();
1210 break;
1211 }
1212 case SPIRV::OpTypeVector: {
1213 unsigned NumComponents = MI.getOperand(2).getImm();
1214 if (NumComponents == 8 || NumComponents == 16)
1215 Reqs.addCapability(SPIRV::Capability::Vector16);
1216 break;
1217 }
1218 case SPIRV::OpTypePointer: {
1219 auto SC = MI.getOperand(1).getImm();
1221 ST);
1222
1223
1224 if (.isOpenCLEnv())
1225 break;
1226 assert(MI.getOperand(2).isReg());
1228 SPIRVType *TypeDef = MRI.getVRegDef(MI.getOperand(2).getReg());
1229 if (TypeDef->getOpcode() == SPIRV::OpTypeFloat &&
1231 Reqs.addCapability(SPIRV::Capability::Float16Buffer);
1232 break;
1233 }
1234 case SPIRV::OpExtInst: {
1235 if (MI.getOperand(2).getImm() ==
1236 static_cast<int64_t>(
1237 SPIRV::InstructionSet::NonSemantic_Shader_DebugInfo_100)) {
1238 Reqs.addExtension(SPIRV::Extension::SPV_KHR_non_semantic_info);
1239 }
1240 break;
1241 }
1242 case SPIRV::OpBitReverse:
1243 case SPIRV::OpBitFieldInsert:
1244 case SPIRV::OpBitFieldSExtract:
1245 case SPIRV::OpBitFieldUExtract:
1246 if (.canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
1248 break;
1249 }
1250 Reqs.addExtension(SPIRV::Extension::SPV_KHR_bit_instructions);
1251 Reqs.addCapability(SPIRV::Capability::BitInstructions);
1252 break;
1253 case SPIRV::OpTypeRuntimeArray:
1255 break;
1256 case SPIRV::OpTypeOpaque:
1257 case SPIRV::OpTypeEvent:
1259 break;
1260 case SPIRV::OpTypePipe:
1261 case SPIRV::OpTypeReserveId:
1263 break;
1264 case SPIRV::OpTypeDeviceEvent:
1265 case SPIRV::OpTypeQueue:
1266 case SPIRV::OpBuildNDRange:
1267 Reqs.addCapability(SPIRV::Capability::DeviceEnqueue);
1268 break;
1269 case SPIRV::OpDecorate:
1270 case SPIRV::OpDecorateId:
1271 case SPIRV::OpDecorateString:
1272 addOpDecorateReqs(MI, 1, Reqs, ST);
1273 break;
1274 case SPIRV::OpMemberDecorate:
1275 case SPIRV::OpMemberDecorateString:
1276 addOpDecorateReqs(MI, 2, Reqs, ST);
1277 break;
1278 case SPIRV::OpInBoundsPtrAccessChain:
1279 Reqs.addCapability(SPIRV::Capability::Addresses);
1280 break;
1281 case SPIRV::OpConstantSampler:
1282 Reqs.addCapability(SPIRV::Capability::LiteralSampler);
1283 break;
1284 case SPIRV::OpInBoundsAccessChain:
1285 case SPIRV::OpAccessChain:
1286 addOpAccessChainReqs(MI, Reqs, ST);
1287 break;
1288 case SPIRV::OpTypeImage:
1289 addOpTypeImageReqs(MI, Reqs, ST);
1290 break;
1291 case SPIRV::OpTypeSampler:
1292 if (.isVulkanEnv()) {
1293 Reqs.addCapability(SPIRV::Capability::ImageBasic);
1294 }
1295 break;
1296 case SPIRV::OpTypeForwardPointer:
1297
1298 Reqs.addCapability(SPIRV::Capability::Addresses);
1299 break;
1300 case SPIRV::OpAtomicFlagTestAndSet:
1301 case SPIRV::OpAtomicLoad:
1302 case SPIRV::OpAtomicStore:
1303 case SPIRV::OpAtomicExchange:
1304 case SPIRV::OpAtomicCompareExchange:
1305 case SPIRV::OpAtomicIIncrement:
1306 case SPIRV::OpAtomicIDecrement:
1307 case SPIRV::OpAtomicIAdd:
1308 case SPIRV::OpAtomicISub:
1309 case SPIRV::OpAtomicUMin:
1310 case SPIRV::OpAtomicUMax:
1311 case SPIRV::OpAtomicSMin:
1312 case SPIRV::OpAtomicSMax:
1313 case SPIRV::OpAtomicAnd:
1314 case SPIRV::OpAtomicOr:
1315 case SPIRV::OpAtomicXor: {
1318 if (MI.getOpcode() == SPIRV::OpAtomicStore) {
1319 assert(MI.getOperand(3).isReg());
1320 InstrPtr = MRI.getVRegDef(MI.getOperand(3).getReg());
1321 assert(InstrPtr && "Unexpected type instruction for OpAtomicStore");
1322 }
1325 SPIRVType *TypeDef = MRI.getVRegDef(TypeReg);
1326 if (TypeDef->getOpcode() == SPIRV::OpTypeInt) {
1329 Reqs.addCapability(SPIRV::Capability::Int64Atomics);
1330 }
1331 break;
1332 }
1333 case SPIRV::OpGroupNonUniformIAdd:
1334 case SPIRV::OpGroupNonUniformFAdd:
1335 case SPIRV::OpGroupNonUniformIMul:
1336 case SPIRV::OpGroupNonUniformFMul:
1337 case SPIRV::OpGroupNonUniformSMin:
1338 case SPIRV::OpGroupNonUniformUMin:
1339 case SPIRV::OpGroupNonUniformFMin:
1340 case SPIRV::OpGroupNonUniformSMax:
1341 case SPIRV::OpGroupNonUniformUMax:
1342 case SPIRV::OpGroupNonUniformFMax:
1343 case SPIRV::OpGroupNonUniformBitwiseAnd:
1344 case SPIRV::OpGroupNonUniformBitwiseOr:
1345 case SPIRV::OpGroupNonUniformBitwiseXor:
1346 case SPIRV::OpGroupNonUniformLogicalAnd:
1347 case SPIRV::OpGroupNonUniformLogicalOr:
1348 case SPIRV::OpGroupNonUniformLogicalXor: {
1349 assert(MI.getOperand(3).isImm());
1350 int64_t GroupOp = MI.getOperand(3).getImm();
1351 switch (GroupOp) {
1352 case SPIRV::GroupOperation::Reduce:
1353 case SPIRV::GroupOperation::InclusiveScan:
1354 case SPIRV::GroupOperation::ExclusiveScan:
1355 Reqs.addCapability(SPIRV::Capability::GroupNonUniformArithmetic);
1356 break;
1357 case SPIRV::GroupOperation::ClusteredReduce:
1358 Reqs.addCapability(SPIRV::Capability::GroupNonUniformClustered);
1359 break;
1360 case SPIRV::GroupOperation::PartitionedReduceNV:
1361 case SPIRV::GroupOperation::PartitionedInclusiveScanNV:
1362 case SPIRV::GroupOperation::PartitionedExclusiveScanNV:
1363 Reqs.addCapability(SPIRV::Capability::GroupNonUniformPartitionedNV);
1364 break;
1365 }
1366 break;
1367 }
1368 case SPIRV::OpGroupNonUniformShuffle:
1369 case SPIRV::OpGroupNonUniformShuffleXor:
1370 Reqs.addCapability(SPIRV::Capability::GroupNonUniformShuffle);
1371 break;
1372 case SPIRV::OpGroupNonUniformShuffleUp:
1373 case SPIRV::OpGroupNonUniformShuffleDown:
1374 Reqs.addCapability(SPIRV::Capability::GroupNonUniformShuffleRelative);
1375 break;
1376 case SPIRV::OpGroupAll:
1377 case SPIRV::OpGroupAny:
1378 case SPIRV::OpGroupBroadcast:
1379 case SPIRV::OpGroupIAdd:
1380 case SPIRV::OpGroupFAdd:
1381 case SPIRV::OpGroupFMin:
1382 case SPIRV::OpGroupUMin:
1383 case SPIRV::OpGroupSMin:
1384 case SPIRV::OpGroupFMax:
1385 case SPIRV::OpGroupUMax:
1386 case SPIRV::OpGroupSMax:
1388 break;
1389 case SPIRV::OpGroupNonUniformElect:
1390 Reqs.addCapability(SPIRV::Capability::GroupNonUniform);
1391 break;
1392 case SPIRV::OpGroupNonUniformAll:
1393 case SPIRV::OpGroupNonUniformAny:
1394 case SPIRV::OpGroupNonUniformAllEqual:
1395 Reqs.addCapability(SPIRV::Capability::GroupNonUniformVote);
1396 break;
1397 case SPIRV::OpGroupNonUniformBroadcast:
1398 case SPIRV::OpGroupNonUniformBroadcastFirst:
1399 case SPIRV::OpGroupNonUniformBallot:
1400 case SPIRV::OpGroupNonUniformInverseBallot:
1401 case SPIRV::OpGroupNonUniformBallotBitExtract:
1402 case SPIRV::OpGroupNonUniformBallotBitCount:
1403 case SPIRV::OpGroupNonUniformBallotFindLSB:
1404 case SPIRV::OpGroupNonUniformBallotFindMSB:
1405 Reqs.addCapability(SPIRV::Capability::GroupNonUniformBallot);
1406 break;
1407 case SPIRV::OpSubgroupShuffleINTEL:
1408 case SPIRV::OpSubgroupShuffleDownINTEL:
1409 case SPIRV::OpSubgroupShuffleUpINTEL:
1410 case SPIRV::OpSubgroupShuffleXorINTEL:
1411 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1412 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1413 Reqs.addCapability(SPIRV::Capability::SubgroupShuffleINTEL);
1414 }
1415 break;
1416 case SPIRV::OpSubgroupBlockReadINTEL:
1417 case SPIRV::OpSubgroupBlockWriteINTEL:
1418 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1419 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1420 Reqs.addCapability(SPIRV::Capability::SubgroupBufferBlockIOINTEL);
1421 }
1422 break;
1423 case SPIRV::OpSubgroupImageBlockReadINTEL:
1424 case SPIRV::OpSubgroupImageBlockWriteINTEL:
1425 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1426 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1427 Reqs.addCapability(SPIRV::Capability::SubgroupImageBlockIOINTEL);
1428 }
1429 break;
1430 case SPIRV::OpSubgroupImageMediaBlockReadINTEL:
1431 case SPIRV::OpSubgroupImageMediaBlockWriteINTEL:
1432 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1433 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_media_block_io);
1434 Reqs.addCapability(SPIRV::Capability::SubgroupImageMediaBlockIOINTEL);
1435 }
1436 break;
1437 case SPIRV::OpAssumeTrueKHR:
1438 case SPIRV::OpExpectKHR:
1439 if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume)) {
1440 Reqs.addExtension(SPIRV::Extension::SPV_KHR_expect_assume);
1441 Reqs.addCapability(SPIRV::Capability::ExpectAssumeKHR);
1442 }
1443 break;
1444 case SPIRV::OpPtrCastToCrossWorkgroupINTEL:
1445 case SPIRV::OpCrossWorkgroupCastToPtrINTEL:
1446 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)) {
1447 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes);
1448 Reqs.addCapability(SPIRV::Capability::USMStorageClassesINTEL);
1449 }
1450 break;
1451 case SPIRV::OpConstantFunctionPointerINTEL:
1452 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)) {
1453 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
1454 Reqs.addCapability(SPIRV::Capability::FunctionPointersINTEL);
1455 }
1456 break;
1457 case SPIRV::OpGroupNonUniformRotateKHR:
1458 if (.canUseExtension(SPIRV::Extension::SPV_KHR_subgroup_rotate))
1459 report_fatal_error("OpGroupNonUniformRotateKHR instruction requires the "
1460 "following SPIR-V extension: SPV_KHR_subgroup_rotate",
1461 false);
1462 Reqs.addExtension(SPIRV::Extension::SPV_KHR_subgroup_rotate);
1463 Reqs.addCapability(SPIRV::Capability::GroupNonUniformRotateKHR);
1464 Reqs.addCapability(SPIRV::Capability::GroupNonUniform);
1465 break;
1466 case SPIRV::OpGroupIMulKHR:
1467 case SPIRV::OpGroupFMulKHR:
1468 case SPIRV::OpGroupBitwiseAndKHR:
1469 case SPIRV::OpGroupBitwiseOrKHR:
1470 case SPIRV::OpGroupBitwiseXorKHR:
1471 case SPIRV::OpGroupLogicalAndKHR:
1472 case SPIRV::OpGroupLogicalOrKHR:
1473 case SPIRV::OpGroupLogicalXorKHR:
1474 if (ST.canUseExtension(
1475 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1476 Reqs.addExtension(SPIRV::Extension::SPV_KHR_uniform_group_instructions);
1477 Reqs.addCapability(SPIRV::Capability::GroupUniformArithmeticKHR);
1478 }
1479 break;
1480 case SPIRV::OpReadClockKHR:
1481 if (.canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock))
1483 "following SPIR-V extension: SPV_KHR_shader_clock",
1484 false);
1485 Reqs.addExtension(SPIRV::Extension::SPV_KHR_shader_clock);
1486 Reqs.addCapability(SPIRV::Capability::ShaderClockKHR);
1487 break;
1488 case SPIRV::OpFunctionPointerCallINTEL:
1489 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)) {
1490 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
1491 Reqs.addCapability(SPIRV::Capability::FunctionPointersINTEL);
1492 }
1493 break;
1494 case SPIRV::OpAtomicFAddEXT:
1495 case SPIRV::OpAtomicFMinEXT:
1496 case SPIRV::OpAtomicFMaxEXT:
1497 AddAtomicFloatRequirements(MI, Reqs, ST);
1498 break;
1499 case SPIRV::OpConvertBF16ToFINTEL:
1500 case SPIRV::OpConvertFToBF16INTEL:
1501 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_bfloat16_conversion)) {
1502 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_bfloat16_conversion);
1503 Reqs.addCapability(SPIRV::Capability::BFloat16ConversionINTEL);
1504 }
1505 break;
1506 case SPIRV::OpVariableLengthArrayINTEL:
1507 case SPIRV::OpSaveMemoryINTEL:
1508 case SPIRV::OpRestoreMemoryINTEL:
1509 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array)) {
1510 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_variable_length_array);
1511 Reqs.addCapability(SPIRV::Capability::VariableLengthArrayINTEL);
1512 }
1513 break;
1514 case SPIRV::OpAsmTargetINTEL:
1515 case SPIRV::OpAsmINTEL:
1516 case SPIRV::OpAsmCallINTEL:
1517 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_inline_assembly)) {
1518 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_inline_assembly);
1519 Reqs.addCapability(SPIRV::Capability::AsmINTEL);
1520 }
1521 break;
1522 case SPIRV::OpTypeCooperativeMatrixKHR:
1523 if (.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
1525 "OpTypeCooperativeMatrixKHR type requires the "
1526 "following SPIR-V extension: SPV_KHR_cooperative_matrix",
1527 false);
1528 Reqs.addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
1529 Reqs.addCapability(SPIRV::Capability::CooperativeMatrixKHR);
1530 break;
1531 case SPIRV::OpArithmeticFenceEXT:
1532 if (.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence))
1534 "following SPIR-V extension: SPV_EXT_arithmetic_fence",
1535 false);
1536 Reqs.addExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence);
1537 Reqs.addCapability(SPIRV::Capability::ArithmeticFenceEXT);
1538 break;
1539 case SPIRV::OpControlBarrierArriveINTEL:
1540 case SPIRV::OpControlBarrierWaitINTEL:
1541 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
1542 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_split_barrier);
1543 Reqs.addCapability(SPIRV::Capability::SplitBarrierINTEL);
1544 }
1545 break;
1546 case SPIRV::OpCooperativeMatrixMulAddKHR: {
1547 if (.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
1549 "following SPIR-V extension: "
1550 "SPV_KHR_cooperative_matrix",
1551 false);
1552 Reqs.addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
1553 Reqs.addCapability(SPIRV::Capability::CooperativeMatrixKHR);
1554 constexpr unsigned MulAddMaxSize = 6;
1555 if (MI.getNumOperands() != MulAddMaxSize)
1556 break;
1557 const int64_t CoopOperands = MI.getOperand(MulAddMaxSize - 1).getImm();
1558 if (CoopOperands &
1559 SPIRV::CooperativeMatrixOperands::MatrixAAndBTF32ComponentsINTEL) {
1560 if (.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1561 report_fatal_error("MatrixAAndBTF32ComponentsINTEL type interpretation "
1562 "require the following SPIR-V extension: "
1563 "SPV_INTEL_joint_matrix",
1564 false);
1565 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1567 SPIRV::Capability::CooperativeMatrixTF32ComponentTypeINTEL);
1568 }
1569 if (CoopOperands & SPIRV::CooperativeMatrixOperands::
1570 MatrixAAndBBFloat16ComponentsINTEL ||
1571 CoopOperands &
1572 SPIRV::CooperativeMatrixOperands::MatrixCBFloat16ComponentsINTEL ||
1573 CoopOperands & SPIRV::CooperativeMatrixOperands::
1574 MatrixResultBFloat16ComponentsINTEL) {
1575 if (.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1577 "require the following SPIR-V extension: "
1578 "SPV_INTEL_joint_matrix",
1579 false);
1580 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1582 SPIRV::Capability::CooperativeMatrixBFloat16ComponentTypeINTEL);
1583 }
1584 break;
1585 }
1586 case SPIRV::OpCooperativeMatrixLoadKHR:
1587 case SPIRV::OpCooperativeMatrixStoreKHR:
1588 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
1589 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
1590 case SPIRV::OpCooperativeMatrixPrefetchINTEL: {
1591 if (.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
1593 "following SPIR-V extension: "
1594 "SPV_KHR_cooperative_matrix",
1595 false);
1596 Reqs.addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
1597 Reqs.addCapability(SPIRV::Capability::CooperativeMatrixKHR);
1598
1599
1600
1601 std::unordered_map<unsigned, unsigned> LayoutToInstMap = {
1602 {SPIRV::OpCooperativeMatrixLoadKHR, 3},
1603 {SPIRV::OpCooperativeMatrixStoreKHR, 2},
1604 {SPIRV::OpCooperativeMatrixLoadCheckedINTEL, 5},
1605 {SPIRV::OpCooperativeMatrixStoreCheckedINTEL, 4},
1606 {SPIRV::OpCooperativeMatrixPrefetchINTEL, 4}};
1607
1608 const auto OpCode = MI.getOpcode();
1609 const unsigned LayoutNum = LayoutToInstMap[OpCode];
1610 Register RegLayout = MI.getOperand(LayoutNum).getReg();
1612 MachineInstr *MILayout = MRI.getUniqueVRegDef(RegLayout);
1613 if (MILayout->getOpcode() == SPIRV::OpConstantI) {
1614 const unsigned LayoutVal = MILayout->getOperand(2).getImm();
1615 if (LayoutVal ==
1616 static_cast<unsigned>(SPIRV::CooperativeMatrixLayout::PackedINTEL)) {
1617 if (.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1618 report_fatal_error("PackedINTEL layout require the following SPIR-V "
1619 "extension: SPV_INTEL_joint_matrix",
1620 false);
1621 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1622 Reqs.addCapability(SPIRV::Capability::PackedCooperativeMatrixINTEL);
1623 }
1624 }
1625
1626
1627 if (OpCode == SPIRV::OpCooperativeMatrixLoadKHR ||
1628 OpCode == SPIRV::OpCooperativeMatrixStoreKHR)
1629 break;
1630
1631 std::string InstName;
1632 switch (OpCode) {
1633 case SPIRV::OpCooperativeMatrixPrefetchINTEL:
1634 InstName = "OpCooperativeMatrixPrefetchINTEL";
1635 break;
1636 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
1637 InstName = "OpCooperativeMatrixLoadCheckedINTEL";
1638 break;
1639 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
1640 InstName = "OpCooperativeMatrixStoreCheckedINTEL";
1641 break;
1642 }
1643
1644 if (.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix)) {
1645 const std::string ErrorMsg =
1646 InstName + " instruction requires the "
1647 "following SPIR-V extension: SPV_INTEL_joint_matrix";
1649 }
1650 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1651 if (OpCode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
1652 Reqs.addCapability(SPIRV::Capability::CooperativeMatrixPrefetchINTEL);
1653 break;
1654 }
1656 SPIRV::Capability::CooperativeMatrixCheckedInstructionsINTEL);
1657 break;
1658 }
1659 case SPIRV::OpCooperativeMatrixConstructCheckedINTEL:
1660 if (.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1662 "instructions require the following SPIR-V extension: "
1663 "SPV_INTEL_joint_matrix",
1664 false);
1665 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1667 SPIRV::Capability::CooperativeMatrixCheckedInstructionsINTEL);
1668 break;
1669 case SPIRV::OpCooperativeMatrixGetElementCoordINTEL:
1670 if (.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1671 report_fatal_error("OpCooperativeMatrixGetElementCoordINTEL requires the "
1672 "following SPIR-V extension: SPV_INTEL_joint_matrix",
1673 false);
1674 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1676 SPIRV::Capability::CooperativeMatrixInvocationInstructionsINTEL);
1677 break;
1678 case SPIRV::OpKill: {
1680 } break;
1681 case SPIRV::OpDemoteToHelperInvocation:
1682 Reqs.addCapability(SPIRV::Capability::DemoteToHelperInvocation);
1683
1684 if (ST.canUseExtension(
1685 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation)) {
1688 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation);
1689 }
1690 break;
1691 case SPIRV::OpSDot:
1692 case SPIRV::OpUDot:
1693 AddDotProductRequirements(MI, Reqs, ST);
1694 break;
1695 case SPIRV::OpImageRead: {
1696 Register ImageReg = MI.getOperand(2).getReg();
1697 SPIRVType *TypeDef = ST.getSPIRVGlobalRegistry()->getResultType(
1699 if (isImageTypeWithUnknownFormat(TypeDef))
1700 Reqs.addCapability(SPIRV::Capability::StorageImageReadWithoutFormat);
1701 break;
1702 }
1703 case SPIRV::OpImageWrite: {
1704 Register ImageReg = MI.getOperand(0).getReg();
1705 SPIRVType *TypeDef = ST.getSPIRVGlobalRegistry()->getResultType(
1707 if (isImageTypeWithUnknownFormat(TypeDef))
1708 Reqs.addCapability(SPIRV::Capability::StorageImageWriteWithoutFormat);
1709 break;
1710 }
1711
1712 default:
1713 break;
1714 }
1715
1716
1717
1718
1720 SPIRV::Capability::Shader);
1721}
1722
1725
1726 for (auto F = M.begin(), E = M.end(); F != E; ++F) {
1728 if (!MF)
1729 continue;
1732 addInstrRequirements(MI, MAI.Reqs, ST);
1733 }
1734
1735 auto Node = M.getNamedMetadata("spirv.ExecutionMode");
1737 bool RequireFloatControls = false, RequireFloatControls2 = false,
1738 VerLower14 = .isAtLeastSPIRVVer(VersionTuple(1, 4));
1739 bool HasFloatControls2 =
1740 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_float_controls2);
1741 for (unsigned i = 0; i < Node->getNumOperands(); i++) {
1742 MDNode *MDN = cast(Node->getOperand(i));
1744 if (auto *CMeta = dyn_cast(MDOp)) {
1745 Constant *C = CMeta->getValue();
1746 if (ConstantInt *Const = dyn_cast(C)) {
1747 auto EM = Const->getZExtValue();
1748
1749
1750 switch (EM) {
1751 case SPIRV::ExecutionMode::DenormPreserve:
1752 case SPIRV::ExecutionMode::DenormFlushToZero:
1753 case SPIRV::ExecutionMode::SignedZeroInfNanPreserve:
1754 case SPIRV::ExecutionMode::RoundingModeRTE:
1755 case SPIRV::ExecutionMode::RoundingModeRTZ:
1756 RequireFloatControls = VerLower14;
1758 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
1759 break;
1760 case SPIRV::ExecutionMode::RoundingModeRTPINTEL:
1761 case SPIRV::ExecutionMode::RoundingModeRTNINTEL:
1762 case SPIRV::ExecutionMode::FloatingPointModeALTINTEL:
1763 case SPIRV::ExecutionMode::FloatingPointModeIEEEINTEL:
1764 if (HasFloatControls2) {
1765 RequireFloatControls2 = true;
1767 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
1768 }
1769 break;
1770 default:
1772 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
1773 }
1774 }
1775 }
1776 }
1777 if (RequireFloatControls &&
1778 ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls))
1779 MAI.Reqs.addExtension(SPIRV::Extension::SPV_KHR_float_controls);
1780 if (RequireFloatControls2)
1781 MAI.Reqs.addExtension(SPIRV::Extension::SPV_INTEL_float_controls2);
1782 }
1783 for (auto FI = M.begin(), E = M.end(); FI != E; ++FI) {
1785 if (F.isDeclaration())
1786 continue;
1787 if (F.getMetadata("reqd_work_group_size"))
1789 SPIRV::OperandCategory::ExecutionModeOperand,
1790 SPIRV::ExecutionMode::LocalSize, ST);
1791 if (F.getFnAttribute("hlsl.numthreads").isValid()) {
1793 SPIRV::OperandCategory::ExecutionModeOperand,
1794 SPIRV::ExecutionMode::LocalSize, ST);
1795 }
1796 if (F.getMetadata("work_group_size_hint"))
1798 SPIRV::OperandCategory::ExecutionModeOperand,
1799 SPIRV::ExecutionMode::LocalSizeHint, ST);
1800 if (F.getMetadata("intel_reqd_sub_group_size"))
1802 SPIRV::OperandCategory::ExecutionModeOperand,
1803 SPIRV::ExecutionMode::SubgroupSize, ST);
1804 if (F.getMetadata("vec_type_hint"))
1806 SPIRV::OperandCategory::ExecutionModeOperand,
1807 SPIRV::ExecutionMode::VecTypeHint, ST);
1808
1809 if (F.hasOptNone()) {
1810 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_optnone)) {
1813 } else if (ST.canUseExtension(SPIRV::Extension::SPV_EXT_optnone)) {
1816 }
1817 }
1818 }
1819}
1820
1821static unsigned getFastMathFlags(const MachineInstr &I) {
1822 unsigned Flags = SPIRV::FPFastMathMode::None;
1824 Flags |= SPIRV::FPFastMathMode::NotNaN;
1826 Flags |= SPIRV::FPFastMathMode::NotInf;
1828 Flags |= SPIRV::FPFastMathMode::NSZ;
1830 Flags |= SPIRV::FPFastMathMode::AllowRecip;
1832 Flags |= SPIRV::FPFastMathMode::Fast;
1834}
1835
1840 getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
1841 SPIRV::Decoration::NoSignedWrap, ST, Reqs)
1842 .IsSatisfiable) {
1844 SPIRV::Decoration::NoSignedWrap, {});
1845 }
1847 getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
1848 SPIRV::Decoration::NoUnsignedWrap, ST,
1849 Reqs)
1850 .IsSatisfiable) {
1852 SPIRV::Decoration::NoUnsignedWrap, {});
1853 }
1854 if (.canUseFastMathFlags(I))
1855 return;
1856 unsigned FMFlags = getFastMathFlags(I);
1857 if (FMFlags == SPIRV::FPFastMathMode::None)
1858 return;
1859 Register DstReg = I.getOperand(0).getReg();
1860 buildOpDecorate(DstReg, I, TII, SPIRV::Decoration::FPFastMathMode, {FMFlags});
1861}
1862
1863
1867 for (auto F = M.begin(), E = M.end(); F != E; ++F) {
1869 if (!MF)
1870 continue;
1871 for (auto &MBB : *MF)
1873 handleMIFlagDecoration(MI, ST, TII, MAI.Reqs);
1874 }
1875}
1876
1880 for (auto F = M.begin(), E = M.end(); F != E; ++F) {
1882 if (!MF)
1883 continue;
1885 for (auto &MBB : *MF) {
1887 continue;
1888
1890 MRI.setRegClass(Reg, &SPIRV::IDRegClass);
1894 }
1895 }
1896}
1897
1898
1901 for (auto F = M.begin(), E = M.end(); F != E; ++F) {
1903 if (!MF)
1904 continue;
1905 for (auto &MBB : *MF) {
1907 if (MI.getOpcode() != TargetOpcode::PHI)
1908 continue;
1909 MI.setDesc(TII.get(SPIRV::OpPhi));
1912 MI.insert(MI.operands_begin() + 1,
1913 {MachineOperand::CreateReg(ResTypeReg, false)});
1914 }
1915 }
1916 }
1917}
1918
1920
1924}
1925
1929 ST = TM.getSubtargetImpl();
1930 GR = ST->getSPIRVGlobalRegistry();
1931 TII = ST->getInstrInfo();
1932
1933 MMI = &getAnalysis().getMMI();
1934
1935 setBaseInfo(M);
1936
1937 patchPhis(M, GR, *TII, MMI);
1938
1939 addMBBNames(M, *TII, MMI, *ST, MAI);
1940 addDecorations(M, *TII, MMI, *ST, MAI);
1941
1942 collectReqs(M, MAI, MMI, *ST);
1943
1944
1945
1946 collectDeclarations(M);
1947
1948
1949 numberRegistersGlobally(M);
1950
1951
1952 processOtherInstrs(M);
1953
1954
1957
1958
1960
1961 return false;
1962}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
ReachingDefAnalysis InstSet & ToRemove
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
#define ATOM_FLT_REQ_EXT_MSG(ExtName)
static cl::opt< bool > SPVDumpDeps("spv-dump-deps", cl::desc("Dump MIR with SPIR-V dependencies info"), cl::Optional, cl::init(false))
unsigned unsigned DefaultVal
static cl::list< SPIRV::Capability::Capability > AvoidCapabilities("avoid-spirv-capabilities", cl::desc("SPIR-V capabilities to avoid if there are " "other options enabling a feature"), cl::ZeroOrMore, cl::Hidden, cl::values(clEnumValN(SPIRV::Capability::Shader, "Shader", "SPIR-V Shader capability")))
Target-Independent Code Generator Pass Configuration Options pass.
This is the shared class of boolean and integer constants.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
Implements a dense probed hash-table based set.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
const MDOperand & getOperand(unsigned I) const
unsigned getNumOperands() const
Return number of MDNode operands.
Tracking metadata reference owned by Metadata.
bool hasName() const
Check if there is a name of corresponding LLVM basic block.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const MachineOperand & getOperand(unsigned i) const
This class contains meta information specific to a module.
MachineFunction * getMachineFunction(const Function &F) const
Returns the MachineFunction associated to IR function F if there is one, otherwise nullptr.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr, const TargetIntrinsicInfo *IntrinsicInfo=nullptr) const
Print the MachineOperand to os.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
Wrapper class representing virtual and physical registers.
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
constexpr bool isValid() const
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
const Value * getGlobalObject(const MachineFunction *MF, Register R)
void setBound(unsigned V)
const MachineOperand * getFunctionDefinitionByUse(const MachineOperand *Use)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
bool isConstantInstr(const MachineInstr &MI) const
bool isInlineAsmDefInstr(const MachineInstr &MI) const
bool isTypeDeclInstr(const MachineInstr &MI) const
bool isSpecConstantInstr(const MachineInstr &MI) const
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
bool contains(const T &V) const
Check if the SmallSet contains the given element.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
std::pair< typename Base::iterator, bool > insert(StringRef key)
Target-Independent Code Generator Pass Configuration Options.
Target - Wrapper for Target specific information.
LLVM Value Representation.
Represents a version number in the form major[.minor[.subminor[.build]]].
bool empty() const
Determine whether this version information is empty (e.g., all version components are zero).
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
Reg
All possible values of the reg field in the ModR/M byte.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
std::string getStringImm(const MachineInstr &MI, unsigned StartIndex)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
hash_code hash_value(const FixedPointSemantics &Val)
ExtensionList getSymbolicOperandExtensions(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
CapabilityList getSymbolicOperandCapabilities(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
VersionTuple getSymbolicOperandMaxVersion(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
void initializeSPIRVModuleAnalysisPass(PassRegistry &)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
CapabilityList getCapabilitiesEnabledByExtension(SPIRV::Extension::Extension Extension)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
std::set< InstrSignature > InstrTraces
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
std::string getSymbolicOperandMnemonic(SPIRV::OperandCategory::OperandCategory Category, int32_t Value)
VersionTuple getSymbolicOperandMinVersion(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
constexpr unsigned BitWidth
std::map< SmallVector< size_t >, unsigned > InstrGRegsMap
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
SmallSet< SPIRV::Capability::Capability, 4 > S
static struct SPIRV::ModuleAnalysisInfo MAI
bool runOnModule(Module &M) override
runOnModule - Virtual method overriden by subclasses to process the module being operated on.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Register getRegisterAlias(const MachineFunction *MF, Register Reg)
SmallVector< const MachineInstr *, 4 > GlobalVarList
DenseMap< const Function *, Register > FuncMap
void setSkipEmission(const MachineInstr *MI)
void setRegisterAlias(const MachineFunction *MF, Register Reg, Register AliasReg)
DenseSet< const MachineInstr * > InstrsToDelete
bool hasRegisterAlias(const MachineFunction *MF, Register Reg)
RegisterAliasMapTy RegisterAliasTable
Register getOrCreateMBBRegister(const MachineBasicBlock &MBB)
bool getSkipEmission(const MachineInstr *MI)
MemoryModel::MemoryModel Mem
InstrList MS[NUM_MODULE_SECTIONS]
AddressingModel::AddressingModel Addr
SourceLanguage::SourceLanguage SrcLang
DenseMap< unsigned, Register > ExtInstSetMap
void addCapabilities(const CapabilityList &ToAdd)
bool isCapabilityAvailable(Capability::Capability Cap) const
void checkSatisfiable(const SPIRVSubtarget &ST) const
void getAndAddRequirements(SPIRV::OperandCategory::OperandCategory Category, uint32_t i, const SPIRVSubtarget &ST)
void addExtension(Extension::Extension ToAdd)
void initAvailableCapabilities(const SPIRVSubtarget &ST)
void removeCapabilityIf(const Capability::Capability ToRemove, const Capability::Capability IfPresent)
void addCapability(Capability::Capability ToAdd)
void addAvailableCaps(const CapabilityList &ToAdd)
void addRequirements(const Requirements &Req)
const std::optional< Capability::Capability > Cap
const VersionTuple MinVer
const VersionTuple MaxVer