LLVM: lib/Target/AArch64/AArch64AdvSIMDScalarPass.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
47using namespace llvm;
48
49#define DEBUG_TYPE "aarch64-simd-scalar"
50
51
52
55 cl::desc("Force use of AdvSIMD scalar instructions everywhere"),
57
58STATISTIC(NumScalarInsnsUsed, "Number of scalar instructions used");
59STATISTIC(NumCopiesDeleted, "Number of cross-class copies deleted");
60STATISTIC(NumCopiesInserted, "Number of cross-class copies inserted");
61
62#define AARCH64_ADVSIMD_NAME "AdvSIMD Scalar Operation Optimization"
63
64namespace {
68
69private:
70
71
72
74
75
76
77
79
80
82
83public:
84 static char ID;
87 }
88
90
92
96 }
97};
98char AArch64AdvSIMDScalar::ID = 0;
99}
100
103
104static bool isGPR64(unsigned Reg, unsigned SubReg,
107 return false;
109 return MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::GPR64RegClass);
110 return AArch64::GPR64RegClass.contains(Reg);
111}
112
116 return (MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::FPR64RegClass) &&
118 (MRI->getRegClass(Reg)->hasSuperClassEq(&AArch64::FPR128RegClass) &&
119 SubReg == AArch64::dsub);
120
121 return (AArch64::FPR64RegClass.contains(Reg) && SubReg == 0) ||
122 (AArch64::FPR128RegClass.contains(Reg) && SubReg == AArch64::dsub);
123}
124
125
126
131
132 if (MI->getOpcode() == AArch64::FMOVDXr ||
133 MI->getOpcode() == AArch64::FMOVXDr)
134 return &MI->getOperand(1);
135
136
137 if (MI->getOpcode() == AArch64::UMOVvi64 && MI->getOperand(2).getImm() == 0) {
138 SubReg = AArch64::dsub;
139 return &MI->getOperand(1);
140 }
141
142
143 if (MI->getOpcode() == AArch64::COPY) {
144 if (isFPR64(MI->getOperand(0).getReg(), MI->getOperand(0).getSubReg(),
146 isGPR64(MI->getOperand(1).getReg(), MI->getOperand(1).getSubReg(), MRI))
147 return &MI->getOperand(1);
148 if (isGPR64(MI->getOperand(0).getReg(), MI->getOperand(0).getSubReg(),
150 isFPR64(MI->getOperand(1).getReg(), MI->getOperand(1).getSubReg(),
152 SubReg = MI->getOperand(1).getSubReg();
153 return &MI->getOperand(1);
154 }
155 }
156
157
158 return nullptr;
159}
160
161
162
163
165 switch (Opc) {
166 default:
167 break;
168
169 case AArch64::ADDXrr:
170 return AArch64::ADDv1i64;
171 case AArch64::SUBXrr:
172 return AArch64::SUBv1i64;
173 case AArch64::ANDXrr:
174 return AArch64::ANDv8i8;
175 case AArch64::EORXrr:
176 return AArch64::EORv8i8;
177 case AArch64::ORRXrr:
178 return AArch64::ORRv8i8;
179 }
180
181 return Opc;
182}
183
185 unsigned Opc = MI.getOpcode();
187}
188
189
190
191
192bool AArch64AdvSIMDScalar::isProfitableToTransform(
194
195
197 return false;
198
199
200
201 unsigned NumNewCopies = 3;
202 unsigned NumRemovableCopies = 0;
203
204 Register OrigSrc0 = MI.getOperand(1).getReg();
205 Register OrigSrc1 = MI.getOperand(2).getReg();
206 unsigned SubReg0;
207 unsigned SubReg1;
208 if (->def_empty(OrigSrc0)) {
210 MRI->def_instr_begin(OrigSrc0);
211 assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
213
214 if (MOSrc0)
215 --NumNewCopies;
216
217
218 if (MOSrc0 && MRI->hasOneNonDBGUse(OrigSrc0))
219 ++NumRemovableCopies;
220 }
221 if (->def_empty(OrigSrc1)) {
223 MRI->def_instr_begin(OrigSrc1);
224 assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
226 if (MOSrc1)
227 --NumNewCopies;
228
229
230 if (MOSrc1 && MRI->hasOneNonDBGUse(OrigSrc1))
231 ++NumRemovableCopies;
232 }
233
234
235
236
237
238
239 Register Dst = MI.getOperand(0).getReg();
240 bool AllUsesAreCopies = true;
242 Use = MRI->use_instr_nodbg_begin(Dst),
243 E = MRI->use_instr_nodbg_end();
247 ++NumRemovableCopies;
248
249
250
251
252
253 else if (Use->getOpcode() == AArch64::INSERT_SUBREG ||
254 Use->getOpcode() == AArch64::INSvi64gpr)
255 ;
256 else
257 AllUsesAreCopies = false;
258 }
259
260
261 if (AllUsesAreCopies)
262 --NumNewCopies;
263
264
265
266 if (NumNewCopies <= NumRemovableCopies)
267 return true;
268
269
270
272}
273
275 unsigned Dst, unsigned Src, bool IsKill) {
277 TII->get(AArch64::COPY), Dst)
280 ++NumCopiesInserted;
281 return MIB;
282}
283
284
285
286
287void AArch64AdvSIMDScalar::transformInstruction(MachineInstr &MI) {
289
291 unsigned OldOpc = MI.getOpcode();
293 assert(OldOpc != NewOpc && "transform an instruction to itself?!");
294
295
296 Register OrigSrc0 = MI.getOperand(1).getReg();
297 Register OrigSrc1 = MI.getOperand(2).getReg();
298 unsigned Src0 = 0, SubReg0;
299 unsigned Src1 = 0, SubReg1;
300 bool KillSrc0 = false, KillSrc1 = false;
301 if (->def_empty(OrigSrc0)) {
303 MRI->def_instr_begin(OrigSrc0);
304 assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
306
307
308 if (MOSrc0) {
309 Src0 = MOSrc0->getReg();
310 KillSrc0 = MOSrc0->isKill();
311
313 if (MRI->hasOneNonDBGUse(OrigSrc0)) {
314 assert(MOSrc0 && "Can't delete copy w/o a valid original source!");
315 Def->eraseFromParent();
316 ++NumCopiesDeleted;
317 }
318 }
319 }
320 if (->def_empty(OrigSrc1)) {
322 MRI->def_instr_begin(OrigSrc1);
323 assert(std::next(Def) == MRI->def_instr_end() && "Multiple def in SSA!");
325
326
327 if (MOSrc1) {
328 Src1 = MOSrc1->getReg();
329 KillSrc1 = MOSrc1->isKill();
330
332 if (MRI->hasOneNonDBGUse(OrigSrc1)) {
333 assert(MOSrc1 && "Can't delete copy w/o a valid original source!");
334 Def->eraseFromParent();
335 ++NumCopiesDeleted;
336 }
337 }
338 }
339
340
341 if (!Src0) {
342 SubReg0 = 0;
343 Src0 = MRI->createVirtualRegister(&AArch64::FPR64RegClass);
345 KillSrc0 = true;
346 }
347 if (!Src1) {
348 SubReg1 = 0;
349 Src1 = MRI->createVirtualRegister(&AArch64::FPR64RegClass);
351 KillSrc1 = true;
352 }
353
354
355
356
357 Register Dst = MRI->createVirtualRegister(&AArch64::FPR64RegClass);
358
359
360
361
365
366
367
368
370
371
372 MI.eraseFromParent();
373
374 ++NumScalarInsnsUsed;
375}
376
377
379 bool Changed = false;
382 transformInstruction(MI);
383 Changed = true;
384 }
385 }
386 return Changed;
387}
388
389
390bool AArch64AdvSIMDScalar::runOnMachineFunction(MachineFunction &mf) {
391 bool Changed = false;
392 LLVM_DEBUG(dbgs() << "***** AArch64AdvSIMDScalar *****\n");
393
395 return false;
396
399
400
402 if (processMachineBasicBlock(&MBB))
403 Changed = true;
404 return Changed;
405}
406
407
408
410 return new AArch64AdvSIMDScalar();
411}
return AArch64::GPR64RegClass contains(Reg)
#define AARCH64_ADVSIMD_NAME
static MachineInstr * insertCopy(const TargetInstrInfo *TII, MachineInstr &MI, unsigned Dst, unsigned Src, bool IsKill)
static cl::opt< bool > TransformAll("aarch64-simd-scalar-force-all", cl::desc("Force use of AdvSIMD scalar instructions everywhere"), cl::init(false), cl::Hidden)
static bool isTransformable(const MachineInstr &MI)
static unsigned getTransformOpcode(unsigned Opc)
unsigned const MachineRegisterInfo * MRI
static bool isFPR64(unsigned Reg, unsigned SubReg, const MachineRegisterInfo *MRI)
static MachineOperand * getSrcFromCopy(MachineInstr *MI, const MachineRegisterInfo *MRI, unsigned &SubReg)
const HexagonInstrInfo * TII
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Represent the analysis usage information of a pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
FunctionPass class - This class is used to implement most global optimizations.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
void setIsKill(bool Val=true)
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Wrapper class representing virtual and physical registers.
static constexpr bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
StringRef - Represent a constant reference to a string, i.e.
TargetInstrInfo - Interface to description of machine instruction set.
virtual const TargetInstrInfo * getInstrInfo() const
A Use represents the edge between a Value definition and its users.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
initializer< Ty > init(const Ty &Val)
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void initializeAArch64AdvSIMDScalarPass(PassRegistry &)
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
unsigned getKillRegState(bool B)
FunctionPass * createAArch64AdvSIMDScalar()
static bool isProfitableToTransform(const Loop &L, const BranchInst *BI)