LLVM: lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

20

21using namespace llvm;

22

24 unsigned OpIdx1, OpIdx2;

25 unsigned Opcode = MI.getOpcode();

26 unsigned NewOpc = 0;

27#define FROM_TO(FROM, TO, IDX1, IDX2) \

28 case X86::FROM: \

29 NewOpc = X86::TO; \

30 OpIdx1 = IDX1; \

31 OpIdx2 = IDX2; \

32 break;

33#define TO_REV(FROM) FROM_TO(FROM, FROM##_REV, 0, 1)

34 switch (Opcode) {

35 default: {

36

37

43 MI.getNumOperands() != 3)

44 return false;

45

46 if (Opcode == X86::VMOVHLPSrr || Opcode == X86::VUNPCKHPDrr)

47 return false;

48 OpIdx1 = 1;

49 OpIdx2 = 2;

50 break;

51 }

52 case X86::VCMPPDrri:

53 case X86::VCMPPDYrri:

54 case X86::VCMPPSrri:

55 case X86::VCMPPSYrri:

56 case X86::VCMPSDrri:

57 case X86::VCMPSSrri: {

58 switch (MI.getOperand(3).getImm() & 0x7) {

59 default:

60 return false;

61 case 0x00:

62 case 0x03:

63 case 0x04:

64 case 0x07:

65 OpIdx1 = 1;

66 OpIdx2 = 2;

67 break;

68 }

69 break;

70 }

71

72

73 FROM_TO(VMOVZPQILo2PQIrr, VMOVPQI2QIrr, 0, 1)

86#undef TO_REV

87#define TO_REV(FROM) FROM_TO(FROM, FROM##_REV, 0, 2)

90#undef TO_REV

91#undef FROM_TO

92 }

95 return false;

96 if (NewOpc)

97 MI.setOpcode(NewOpc);

98 else

99 std::swap(MI.getOperand(OpIdx1), MI.getOperand(OpIdx2));

100 return true;

101}

102

103

104

106 unsigned NewOpc;

107#define TO_IMM1(FROM) \

108 case X86::FROM##i: \

109 NewOpc = X86::FROM##1; \

110 break; \

111 case X86::FROM##i_EVEX: \

112 NewOpc = X86::FROM##1_EVEX; \

113 break; \

114 case X86::FROM##i_ND: \

115 NewOpc = X86::FROM##1_ND; \

116 break;

117 switch (MI.getOpcode()) {

118 default:

119 return false;

136#undef TO_IMM1

137#define TO_IMM1(FROM) \

138 case X86::FROM##i: \

139 NewOpc = X86::FROM##1; \

140 break; \

141 case X86::FROM##i_EVEX: \

142 NewOpc = X86::FROM##1_EVEX; \

143 break; \

144 case X86::FROM##i_NF: \

145 NewOpc = X86::FROM##1_NF; \

146 break; \

147 case X86::FROM##i_ND: \

148 NewOpc = X86::FROM##1_ND; \

149 break; \

150 case X86::FROM##i_NF_ND: \

151 NewOpc = X86::FROM##1_NF_ND; \

152 break;

193#undef TO_IMM1

194 }

195 MCOperand &LastOp = MI.getOperand(MI.getNumOperands() - 1);

196 if (!LastOp.isImm() || LastOp.getImm() != 1)

197 return false;

198 MI.setOpcode(NewOpc);

199 MI.erase(&LastOp);

200 return true;

201}

202

204 unsigned Opc1;

205 unsigned Opc2;

206#define FROM_TO(FROM, TO1, TO2) \

207 case X86::FROM: \

208 Opc1 = X86::TO1; \

209 Opc2 = X86::TO2; \

210 break;

211 switch (MI.getOpcode()) {

212 default:

213 return false;

214 FROM_TO(VPCMPBZ128rmi, VPCMPEQBZ128rm, VPCMPGTBZ128rm)

215 FROM_TO(VPCMPBZ128rmik, VPCMPEQBZ128rmk, VPCMPGTBZ128rmk)

216 FROM_TO(VPCMPBZ128rri, VPCMPEQBZ128rr, VPCMPGTBZ128rr)

217 FROM_TO(VPCMPBZ128rrik, VPCMPEQBZ128rrk, VPCMPGTBZ128rrk)

218 FROM_TO(VPCMPBZ256rmi, VPCMPEQBZ256rm, VPCMPGTBZ256rm)

219 FROM_TO(VPCMPBZ256rmik, VPCMPEQBZ256rmk, VPCMPGTBZ256rmk)

220 FROM_TO(VPCMPBZ256rri, VPCMPEQBZ256rr, VPCMPGTBZ256rr)

221 FROM_TO(VPCMPBZ256rrik, VPCMPEQBZ256rrk, VPCMPGTBZ256rrk)

222 FROM_TO(VPCMPBZrmi, VPCMPEQBZrm, VPCMPGTBZrm)

223 FROM_TO(VPCMPBZrmik, VPCMPEQBZrmk, VPCMPGTBZrmk)

224 FROM_TO(VPCMPBZrri, VPCMPEQBZrr, VPCMPGTBZrr)

225 FROM_TO(VPCMPBZrrik, VPCMPEQBZrrk, VPCMPGTBZrrk)

226 FROM_TO(VPCMPDZ128rmi, VPCMPEQDZ128rm, VPCMPGTDZ128rm)

227 FROM_TO(VPCMPDZ128rmbi, VPCMPEQDZ128rmb, VPCMPGTDZ128rmb)

228 FROM_TO(VPCMPDZ128rmbik, VPCMPEQDZ128rmbk, VPCMPGTDZ128rmbk)

229 FROM_TO(VPCMPDZ128rmik, VPCMPEQDZ128rmk, VPCMPGTDZ128rmk)

230 FROM_TO(VPCMPDZ128rri, VPCMPEQDZ128rr, VPCMPGTDZ128rr)

231 FROM_TO(VPCMPDZ128rrik, VPCMPEQDZ128rrk, VPCMPGTDZ128rrk)

232 FROM_TO(VPCMPDZ256rmi, VPCMPEQDZ256rm, VPCMPGTDZ256rm)

233 FROM_TO(VPCMPDZ256rmbi, VPCMPEQDZ256rmb, VPCMPGTDZ256rmb)

234 FROM_TO(VPCMPDZ256rmbik, VPCMPEQDZ256rmbk, VPCMPGTDZ256rmbk)

235 FROM_TO(VPCMPDZ256rmik, VPCMPEQDZ256rmk, VPCMPGTDZ256rmk)

236 FROM_TO(VPCMPDZ256rri, VPCMPEQDZ256rr, VPCMPGTDZ256rr)

237 FROM_TO(VPCMPDZ256rrik, VPCMPEQDZ256rrk, VPCMPGTDZ256rrk)

238 FROM_TO(VPCMPDZrmi, VPCMPEQDZrm, VPCMPGTDZrm)

239 FROM_TO(VPCMPDZrmbi, VPCMPEQDZrmb, VPCMPGTDZrmb)

240 FROM_TO(VPCMPDZrmbik, VPCMPEQDZrmbk, VPCMPGTDZrmbk)

241 FROM_TO(VPCMPDZrmik, VPCMPEQDZrmk, VPCMPGTDZrmk)

242 FROM_TO(VPCMPDZrri, VPCMPEQDZrr, VPCMPGTDZrr)

243 FROM_TO(VPCMPDZrrik, VPCMPEQDZrrk, VPCMPGTDZrrk)

244 FROM_TO(VPCMPQZ128rmi, VPCMPEQQZ128rm, VPCMPGTQZ128rm)

245 FROM_TO(VPCMPQZ128rmbi, VPCMPEQQZ128rmb, VPCMPGTQZ128rmb)

246 FROM_TO(VPCMPQZ128rmbik, VPCMPEQQZ128rmbk, VPCMPGTQZ128rmbk)

247 FROM_TO(VPCMPQZ128rmik, VPCMPEQQZ128rmk, VPCMPGTQZ128rmk)

248 FROM_TO(VPCMPQZ128rri, VPCMPEQQZ128rr, VPCMPGTQZ128rr)

249 FROM_TO(VPCMPQZ128rrik, VPCMPEQQZ128rrk, VPCMPGTQZ128rrk)

250 FROM_TO(VPCMPQZ256rmi, VPCMPEQQZ256rm, VPCMPGTQZ256rm)

251 FROM_TO(VPCMPQZ256rmbi, VPCMPEQQZ256rmb, VPCMPGTQZ256rmb)

252 FROM_TO(VPCMPQZ256rmbik, VPCMPEQQZ256rmbk, VPCMPGTQZ256rmbk)

253 FROM_TO(VPCMPQZ256rmik, VPCMPEQQZ256rmk, VPCMPGTQZ256rmk)

254 FROM_TO(VPCMPQZ256rri, VPCMPEQQZ256rr, VPCMPGTQZ256rr)

255 FROM_TO(VPCMPQZ256rrik, VPCMPEQQZ256rrk, VPCMPGTQZ256rrk)

256 FROM_TO(VPCMPQZrmi, VPCMPEQQZrm, VPCMPGTQZrm)

257 FROM_TO(VPCMPQZrmbi, VPCMPEQQZrmb, VPCMPGTQZrmb)

258 FROM_TO(VPCMPQZrmbik, VPCMPEQQZrmbk, VPCMPGTQZrmbk)

259 FROM_TO(VPCMPQZrmik, VPCMPEQQZrmk, VPCMPGTQZrmk)

260 FROM_TO(VPCMPQZrri, VPCMPEQQZrr, VPCMPGTQZrr)

261 FROM_TO(VPCMPQZrrik, VPCMPEQQZrrk, VPCMPGTQZrrk)

262 FROM_TO(VPCMPWZ128rmi, VPCMPEQWZ128rm, VPCMPGTWZ128rm)

263 FROM_TO(VPCMPWZ128rmik, VPCMPEQWZ128rmk, VPCMPGTWZ128rmk)

264 FROM_TO(VPCMPWZ128rri, VPCMPEQWZ128rr, VPCMPGTWZ128rr)

265 FROM_TO(VPCMPWZ128rrik, VPCMPEQWZ128rrk, VPCMPGTWZ128rrk)

266 FROM_TO(VPCMPWZ256rmi, VPCMPEQWZ256rm, VPCMPGTWZ256rm)

267 FROM_TO(VPCMPWZ256rmik, VPCMPEQWZ256rmk, VPCMPGTWZ256rmk)

268 FROM_TO(VPCMPWZ256rri, VPCMPEQWZ256rr, VPCMPGTWZ256rr)

269 FROM_TO(VPCMPWZ256rrik, VPCMPEQWZ256rrk, VPCMPGTWZ256rrk)

270 FROM_TO(VPCMPWZrmi, VPCMPEQWZrm, VPCMPGTWZrm)

271 FROM_TO(VPCMPWZrmik, VPCMPEQWZrmk, VPCMPGTWZrmk)

272 FROM_TO(VPCMPWZrri, VPCMPEQWZrr, VPCMPGTWZrr)

273 FROM_TO(VPCMPWZrrik, VPCMPEQWZrrk, VPCMPGTWZrrk)

274#undef FROM_TO

275 }

276 MCOperand &LastOp = MI.getOperand(MI.getNumOperands() - 1);

277 int64_t Imm = LastOp.getImm();

278 unsigned NewOpc;

279 if (Imm == 0)

280 NewOpc = Opc1;

281 else if(Imm == 6)

282 NewOpc = Opc2;

283 else

284 return false;

285 MI.setOpcode(NewOpc);

286 MI.erase(&LastOp);

287 return true;

288}

289

291 unsigned NewOpc;

292#define FROM_TO(FROM, TO, R0, R1) \

293 case X86::FROM: \

294 if (MI.getOperand(0).getReg() != X86::R0 || \

295 MI.getOperand(1).getReg() != X86::R1) \

296 return false; \

297 NewOpc = X86::TO; \

298 break;

299 switch (MI.getOpcode()) {

300 default:

301 return false;

302 FROM_TO(MOVSX16rr8, CBW, AX, AL)

303 FROM_TO(MOVSX32rr16, CWDE, EAX, AX)

304 FROM_TO(MOVSX64rr32, CDQE, RAX, EAX)

305#undef FROM_TO

306 }

307 MI.clear();

308 MI.setOpcode(NewOpc);

309 return true;

310}

311

313 if (In64BitMode)

314 return false;

315 unsigned NewOpc;

316

317#define FROM_TO(FROM, TO) \

318 case X86::FROM: \

319 NewOpc = X86::TO; \

320 break;

321 switch (MI.getOpcode()) {

322 default:

323 return false;

324 FROM_TO(DEC16r, DEC16r_alt)

325 FROM_TO(DEC32r, DEC32r_alt)

326 FROM_TO(INC16r, INC16r_alt)

327 FROM_TO(INC32r, INC32r_alt)

328 }

329 MI.setOpcode(NewOpc);

330 return true;

331}

332

334 return Reg == X86::AL || Reg == X86::AX || Reg == X86::EAX || Reg == X86::RAX;

335}

336

337

339

340

341 if (In64BitMode)

342 return false;

343 unsigned NewOpc;

344

345

346

347

348

349

350

351 switch (MI.getOpcode()) {

352 default:

353 return false;

354 FROM_TO(MOV8mr_NOREX, MOV8o32a)

355 FROM_TO(MOV8mr, MOV8o32a)

356 FROM_TO(MOV8rm_NOREX, MOV8ao32)

357 FROM_TO(MOV8rm, MOV8ao32)

358 FROM_TO(MOV16mr, MOV16o32a)

359 FROM_TO(MOV16rm, MOV16ao32)

360 FROM_TO(MOV32mr, MOV32o32a)

361 FROM_TO(MOV32rm, MOV32ao32)

362 }

363 bool IsStore = MI.getOperand(0).isReg() && MI.getOperand(1).isReg();

364 unsigned AddrBase = IsStore;

365 unsigned RegOp = IsStore ? 0 : 5;

366 unsigned AddrOp = AddrBase + 3;

367

368 MCRegister Reg = MI.getOperand(RegOp).getReg();

370 return false;

371

372

373

374 bool Absolute = true;

375 if (MI.getOperand(AddrOp).isExpr()) {

376 const MCExpr *MCE = MI.getOperand(AddrOp).getExpr();

379 Absolute = false;

380 }

381 if (Absolute && (MI.getOperand(AddrBase + X86::AddrBaseReg).getReg() ||

384 return false;

385

388 MI.clear();

389 MI.setOpcode(NewOpc);

391 MI.addOperand(Seg);

392 return true;

393}

394

395

396

398 unsigned NewOpc;

399 switch (MI.getOpcode()) {

400 default:

401 return false;

403 FROM_TO(ADC16ri, ADC16i16)

404 FROM_TO(ADC32ri, ADC32i32)

405 FROM_TO(ADC64ri32, ADC64i32)

407 FROM_TO(ADD16ri, ADD16i16)

408 FROM_TO(ADD32ri, ADD32i32)

409 FROM_TO(ADD64ri32, ADD64i32)

411 FROM_TO(AND16ri, AND16i16)

412 FROM_TO(AND32ri, AND32i32)

413 FROM_TO(AND64ri32, AND64i32)

415 FROM_TO(CMP16ri, CMP16i16)

416 FROM_TO(CMP32ri, CMP32i32)

417 FROM_TO(CMP64ri32, CMP64i32)

421 FROM_TO(OR64ri32, OR64i32)

423 FROM_TO(SBB16ri, SBB16i16)

424 FROM_TO(SBB32ri, SBB32i32)

425 FROM_TO(SBB64ri32, SBB64i32)

427 FROM_TO(SUB16ri, SUB16i16)

428 FROM_TO(SUB32ri, SUB32i32)

429 FROM_TO(SUB64ri32, SUB64i32)

430 FROM_TO(TEST8ri, TEST8i8)

431 FROM_TO(TEST16ri, TEST16i16)

432 FROM_TO(TEST32ri, TEST32i32)

433 FROM_TO(TEST64ri32, TEST64i32)

435 FROM_TO(XOR16ri, XOR16i16)

436 FROM_TO(XOR32ri, XOR32i32)

437 FROM_TO(XOR64ri32, XOR64i32)

438 }

439

442 return false;

443

444

446 MI.clear();

447 MI.setOpcode(NewOpc);

449 return true;

450}

451

453#define ENTRY(LONG, SHORT) \

454 case X86::LONG: \

455 return X86::SHORT;

456 switch (Opcode) {

457 default:

458 return Opcode;

459#include "X86EncodingOptimizationForImmediate.def"

460 }

461}

462

464#define ENTRY(LONG, SHORT) \

465 case X86::SHORT: \

466 return X86::LONG;

467 switch (Opcode) {

468 default:

469 return Opcode;

470#include "X86EncodingOptimizationForImmediate.def"

471 }

472}

473

475 unsigned NewOpc;

476#define ENTRY(LONG, SHORT) \

477 case X86::LONG: \

478 NewOpc = X86::SHORT; \

479 break;

480 switch (MI.getOpcode()) {

481 default:

482 return false;

483#include "X86EncodingOptimizationForImmediate.def"

484 }

485 unsigned SkipOperands = X86::isCCMPCC(MI.getOpcode()) ? 2 : 0;

486 MCOperand &LastOp = MI.getOperand(MI.getNumOperands() - 1 - SkipOperands);

487 if (LastOp.isExpr()) {

490 return false;

491 } else if (LastOp.isImm()) {

493 return false;

494 }

495 MI.setOpcode(NewOpc);

496 return true;

497}

498

500

503 return ShortImm || FixedReg;

504}

Fixup Statepoint Caller Saved

static bool optimizeToShortImmediateForm(MCInst &MI)

Definition X86EncodingOptimization.cpp:474

#define FROM_TO(FROM, TO, IDX1, IDX2)

static bool optimizeToFixedRegisterForm(MCInst &MI)

Simplify FOO imm,imm, %{al,ax,eax,rax} to FOO imm,imm, for instruction with a short fixed-register form.

Definition X86EncodingOptimization.cpp:397

static bool isARegister(MCRegister Reg)

Definition X86EncodingOptimization.cpp:333

Base class for the full range of assembler expressions which are needed for parsing.

Instances of this class represent a single low-level machine instruction.

Describe properties that are true of each instruction in the target description file.

Instances of this class represent operands of the MCInst class.

const MCExpr * getExpr() const

Wrapper class representing physical registers. Should be passed by value.

Represent a reference to a symbol from inside an expression.

uint16_t getSpecifier() const

@ VEX

VEX - encoding using 0xC4/0xC5.

@ TB

TB - TwoByte - Set if this instruction has a two byte opcode, which starts with a 0x0F byte before th...

@ MRMSrcReg

MRMSrcReg - This form is used for instructions that use the Mod/RM byte to specify a source,...

bool isX86_64ExtendedReg(MCRegister Reg)

bool optimizeToFixedRegisterOrShortImmediateForm(MCInst &MI)

Definition X86EncodingOptimization.cpp:499

bool optimizeMOV(MCInst &MI, bool In64BitMode)

Simplify things like MOV32rm to MOV32o32a.

Definition X86EncodingOptimization.cpp:338

bool optimizeMOVSX(MCInst &MI)

Definition X86EncodingOptimization.cpp:290

bool optimizeVPCMPWithImmediateOneOrSix(MCInst &MI)

Definition X86EncodingOptimization.cpp:203

bool optimizeShiftRotateWithImmediateOne(MCInst &MI)

Definition X86EncodingOptimization.cpp:105

bool optimizeInstFromVEX3ToVEX2(MCInst &MI, const MCInstrDesc &Desc)

Definition X86EncodingOptimization.cpp:23

unsigned getOpcodeForLongImmediateForm(unsigned Opcode)

Definition X86EncodingOptimization.cpp:463

bool optimizeINCDEC(MCInst &MI, bool In64BitMode)

Definition X86EncodingOptimization.cpp:312

unsigned getOpcodeForShortImmediateForm(unsigned Opcode)

Definition X86EncodingOptimization.cpp:452

This is an optimization pass for GlobalISel generic memory operations.

constexpr bool isInt(int64_t x)

Checks if an integer fits into the given bit width.

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.