LLVM: lib/CodeGen/TargetSchedule.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
26#include
27#include
28
29using namespace llvm;
30
33 cl::desc("Force the use of resource intervals in the schedule model"));
34
36 return EnableSchedModel && SchedModel.hasInstrSchedModel();
37}
38
40 return EnableSchedItins && !InstrItins.isEmpty();
41}
42
44 bool EnableSModel, bool EnableSItins) {
45 STI = TSInfo;
48 STI->initInstrItins(InstrItins);
49
50 EnableSchedModel = EnableSModel;
51 EnableSchedItins = EnableSItins;
52
53 unsigned NumRes = SchedModel.getNumProcResourceKinds();
54 ResourceFactors.resize(NumRes);
55 ResourceLCM = SchedModel.IssueWidth;
56 for (unsigned Idx = 0; Idx < NumRes; ++Idx) {
57 unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits;
58 if (NumUnits > 0)
59 ResourceLCM = std::lcm(ResourceLCM, NumUnits);
60 }
61 MicroOpFactor = ResourceLCM / SchedModel.IssueWidth;
62 for (unsigned Idx = 0; Idx < NumRes; ++Idx) {
63 unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits;
64 ResourceFactors[Idx] = NumUnits ? (ResourceLCM / NumUnits) : 0;
65 }
66}
67
68
72 if (!SC)
76 }
77 return false;
78}
79
83 if (!SC)
87 }
88 return false;
89}
90
94 int UOps = InstrItins.getNumMicroOps(MI->getDesc().getSchedClass());
95 return (UOps >= 0) ? UOps : TII->getNumMicroOps(&InstrItins, *MI);
96 }
98 if (!SC)
102 }
103 return MI->isTransient() ? 0 : 1;
104}
105
106
107
108
109
111 return Cycles >= 0 ? Cycles : 1000;
112}
113
114
115
118
119 unsigned SchedClass = MI->getDesc().getSchedClass();
120 const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SchedClass);
122 return SCDesc;
123
124#ifndef NDEBUG
125 unsigned NIter = 0;
126#endif
128 assert(++NIter < 6 && "Variants are nested deeper than the magic number");
129
130 SchedClass = STI->resolveSchedClass(SchedClass, MI, this);
131 SCDesc = SchedModel.getSchedClassDesc(SchedClass);
132 }
133 return SCDesc;
134}
135
136
137
138
139
140
142 unsigned DefIdx = 0;
143 for (unsigned i = 0; i != DefOperIdx; ++i) {
146 ++DefIdx;
147 }
148 return DefIdx;
149}
150
151
152
153
154
155
156
158 unsigned UseIdx = 0;
159 for (unsigned i = 0; i != UseOperIdx; ++i) {
162 ++UseIdx;
163 }
164 return UseIdx;
165}
166
167
168
172
173 const unsigned InstrLatency = computeInstrLatency(DefMI);
174 const unsigned DefaultDefLatency = TII->defaultDefLatency(SchedModel, *DefMI);
175
177 return DefaultDefLatency;
178
180 std::optional OperLatency;
182 OperLatency = TII->getOperandLatency(&InstrItins, *DefMI, DefOperIdx,
183 *UseMI, UseOperIdx);
184 }
185 else {
186 unsigned DefClass = DefMI->getDesc().getSchedClass();
187 OperLatency = InstrItins.getOperandCycle(DefClass, DefOperIdx);
188 }
189
190
191
192 return OperLatency ? *OperLatency
193 : std::max(InstrLatency, DefaultDefLatency);
194 }
195
196
199 if (DefIdx < SCDesc->NumWriteLatencyEntries) {
200
202 STI->getWriteLatencyEntry(SCDesc, DefIdx);
207
208
213 int Advance = STI->getReadAdvanceCycles(UseDesc, UseIdx, WriteID);
214 if (Advance > 0 && (unsigned)Advance > Latency)
215 return 0;
216 return Latency - Advance;
217 }
218
219
220#ifndef NDEBUG
221 if (SCDesc->isValid() && ->getOperand(DefOperIdx).isImplicit() &&
222 ->getDesc().operands()[DefOperIdx].isOptionalDef() &&
223 SchedModel.isComplete()) {
224 errs() << "DefIdx " << DefIdx << " exceeds machine model writes for "
225 << *DefMI << " (Try with MCSchedModel.CompleteModel set to false)";
227 }
228#endif
229
230
231
232 return DefMI->isTransient() ? 0 : DefaultDefLatency;
233}
234
235unsigned
236TargetSchedModel::computeInstrLatency(const MCSchedClassDesc &SCDesc) const {
238}
239
240unsigned TargetSchedModel::computeInstrLatency(unsigned Opcode) const {
242 unsigned SCIdx = TII->get(Opcode).getSchedClass();
243 return capLatency(SchedModel.computeInstrLatency(*STI, SCIdx));
244}
245
246unsigned TargetSchedModel::computeInstrLatency(const MCInst &Inst) const {
248 return capLatency(SchedModel.computeInstrLatency(*STI, *TII, Inst));
249 return computeInstrLatency(Inst.getOpcode());
250}
251
252unsigned
254 bool UseDefaultDefLatency) const {
255
256
259 return TII->getInstrLatency(&InstrItins, *MI);
260
264 return computeInstrLatency(*SCDesc);
265 }
266 return TII->defaultDefLatency(SchedModel, *MI);
267}
268
272 if (!SchedModel.isOutOfOrder())
273 return 1;
274
275
276
277
278
279
280
281
282
283 Register Reg = DefMI->getOperand(DefOperIdx).getReg();
286 if (!DepMI->readsRegister(Reg, TRI) && TII->isPredicated(*DepMI))
287 return computeInstrLatency(DefMI);
288
289
290
295 *PRE = STI->getWriteProcResEnd(SCDesc); PRI != PRE; ++PRI) {
296 if (!SchedModel.getProcResource(PRI->ProcResourceIdx)->BufferSize)
297 return 1;
298 }
299 }
300 }
301 return 0;
302}
303
304double
307 unsigned SchedClass = MI->getDesc().getSchedClass();
310 }
311
314
315 return 0.0;
316}
317
318double
320 unsigned SchedClass = TII->get(Opcode).getSchedClass();
325 const MCSchedClassDesc &SCDesc = *SchedModel.getSchedClassDesc(SchedClass);
328 }
329
330 return 0.0;
331}
332
333double
336 return SchedModel.getReciprocalThroughput(*STI, *TII, MI);
338}
339
342 return true;
343
344 return SchedModel.EnableIntervals;
345}
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Register const TargetRegisterInfo * TRI
static unsigned findUseIdx(const MachineInstr *MI, unsigned UseOperIdx)
Find the use index of this operand.
Definition TargetSchedule.cpp:157
static unsigned capLatency(int Cycles)
Definition TargetSchedule.cpp:110
static unsigned findDefIdx(const MachineInstr *MI, unsigned DefOperIdx)
Find the def index of this operand.
Definition TargetSchedule.cpp:141
static cl::opt< bool > ForceEnableIntervals("sched-model-force-enable-intervals", cl::Hidden, cl::init(false), cl::desc("Force the use of resource intervals in the schedule model"))
Instances of this class represent a single low-level machine instruction.
unsigned getOpcode() const
const MCSchedModel & getSchedModel() const
Get the machine model for this subtarget's CPU.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Representation of each machine instruction.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
MachineOperand class - Representation of each machine instruction operand.
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Wrapper class representing virtual and physical registers.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
LLVM_ABI bool mustEndGroup(const MachineInstr *MI, const MCSchedClassDesc *SC=nullptr) const
Return true if current group must end.
Definition TargetSchedule.cpp:80
LLVM_ABI bool hasInstrSchedModel() const
Return true if this machine model includes an instruction-level scheduling model.
Definition TargetSchedule.cpp:35
LLVM_ABI unsigned computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx, const MachineInstr *DepMI) const
Output dependency latency of a pair of defs of the same register.
Definition TargetSchedule.cpp:270
LLVM_ABI bool mustBeginGroup(const MachineInstr *MI, const MCSchedClassDesc *SC=nullptr) const
Return true if new group must begin.
Definition TargetSchedule.cpp:69
LLVM_ABI void init(const TargetSubtargetInfo *TSInfo, bool EnableSModel=true, bool EnableSItins=true)
Initialize the machine model for instruction scheduling.
Definition TargetSchedule.cpp:43
LLVM_ABI const MCSchedClassDesc * resolveSchedClass(const MachineInstr *MI) const
Return the MCSchedClassDesc for this instruction.
Definition TargetSchedule.cpp:117
LLVM_ABI unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx, const MachineInstr *UseMI, unsigned UseOperIdx) const
Compute operand latency based on the available machine model.
Definition TargetSchedule.cpp:169
LLVM_ABI double computeReciprocalThroughput(const MachineInstr *MI) const
Compute the reciprocal throughput of the given instruction.
Definition TargetSchedule.cpp:305
LLVM_ABI unsigned getNumMicroOps(const MachineInstr *MI, const MCSchedClassDesc *SC=nullptr) const
Return the number of issue slots required for this MI.
Definition TargetSchedule.cpp:91
const InstrItineraryData * getInstrItineraries() const
LLVM_ABI bool enableIntervals() const
Definition TargetSchedule.cpp:340
LLVM_ABI bool hasInstrItineraries() const
Return true if this machine model includes cycle-to-cycle itinerary data.
Definition TargetSchedule.cpp:39
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
Summarize the scheduling resources required for an instruction of a particular scheduling class.
uint16_t NumReadAdvanceEntries
static LLVM_ABI int computeInstrLatency(const MCSubtargetInfo &STI, const MCSchedClassDesc &SCDesc)
Returns the latency value for the scheduling class.
static LLVM_ABI double getReciprocalThroughput(const MCSubtargetInfo &STI, const MCSchedClassDesc &SCDesc)
Specify the latency in cpu cycles for a particular scheduling class and def index.
Identify one of the processor resource kinds consumed by a particular scheduling class for the specif...