LLVM: lib/Target/X86/X86FixupVectorConstants.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

24

25using namespace llvm;

26

27#define DEBUG_TYPE "x86-fixup-vector-constants"

28

29STATISTIC(NumInstChanges, "Number of instructions changes");

30

31namespace {

33public:

34 static char ID;

35

37

38 StringRef getPassName() const override {

39 return "X86 Fixup Vector Constants";

40 }

41

42 bool runOnMachineFunction(MachineFunction &MF) override;

43 bool processInstruction(MachineFunction &MF, MachineBasicBlock &MBB,

44 MachineInstr &MI);

45

46

47 MachineFunctionProperties getRequiredProperties() const override {

48 return MachineFunctionProperties().setNoVRegs();

49 }

50

51private:

52 const X86InstrInfo *TII = nullptr;

53 const X86Subtarget *ST = nullptr;

54 const MCSchedModel *SM = nullptr;

55};

56}

57

58char X86FixupVectorConstantsPass::ID = 0;

59

61

63 return new X86FixupVectorConstantsPass();

64}

65

66

67

68

71 for (Value *Op : C->operands()) {

74 continue;

75 if (!Res)

76 Res = OpC;

77 else if (Res != OpC)

78 return nullptr;

79 }

80 return Res;

81}

82

83

85 unsigned NumBits = C->getType()->getPrimitiveSizeInBits();

86

89

93

94 return CInt->getValue();

95 }

96

99 return APInt::getSplat(NumBits, CFP->getValue().bitcastToAPInt());

100

101 return CFP->getValue().bitcastToAPInt();

102 }

103

107 assert((NumBits % Bits->getBitWidth()) == 0 && "Illegal splat");

109 }

110 }

111

113 for (unsigned I = 0, E = CV->getNumOperands(); I != E; ++I) {

116 if (!SubBits)

117 return std::nullopt;

118 assert(NumBits == (E * SubBits->getBitWidth()) &&

119 "Illegal vector element size");

120 Bits.insertBits(*SubBits, I * SubBits->getBitWidth());

121 }

122 return Bits;

123 }

124

126 bool IsInteger = CDS->getElementType()->isIntegerTy();

127 bool IsFloat = CDS->getElementType()->isHalfTy() ||

128 CDS->getElementType()->isBFloatTy() ||

129 CDS->getElementType()->isFloatTy() ||

130 CDS->getElementType()->isDoubleTy();

131 if (IsInteger || IsFloat) {

133 unsigned EltBits = CDS->getElementType()->getPrimitiveSizeInBits();

134 for (unsigned I = 0, E = CDS->getNumElements(); I != E; ++I) {

135 if (IsInteger)

136 Bits.insertBits(CDS->getElementAsAPInt(I), I * EltBits);

137 else

138 Bits.insertBits(CDS->getElementAsAPFloat(I).bitcastToAPInt(),

139 I * EltBits);

140 }

141 return Bits;

142 }

143 }

144

145 return std::nullopt;

146}

147

149 unsigned NumBits) {

151 return Bits->zextOrTrunc(NumBits);

152 return std::nullopt;

153}

154

155

156

158 unsigned SplatBitWidth) {

159 const Type *Ty = C->getType();

160 assert((Ty->getPrimitiveSizeInBits() % SplatBitWidth) == 0 &&

161 "Illegal splat width");

162

164 if (Bits->isSplat(SplatBitWidth))

165 return Bits->trunc(SplatBitWidth);

166

167

168

170 unsigned NumOps = CV->getNumOperands();

171 unsigned NumEltsBits = Ty->getScalarSizeInBits();

172 unsigned NumScaleOps = SplatBitWidth / NumEltsBits;

173 if ((SplatBitWidth % NumEltsBits) == 0) {

174

175

177 for (unsigned Idx = 0; Idx != NumOps; ++Idx) {

180 continue;

181 unsigned SplatIdx = Idx % NumScaleOps;

182 if (!Sequence[SplatIdx] || Sequence[SplatIdx] == Elt) {

183 Sequence[SplatIdx] = Elt;

184 continue;

185 }

186 }

187 return std::nullopt;

188 }

189

190

192 for (unsigned I = 0; I != NumScaleOps; ++I) {

193 if (!Sequence[I])

194 continue;

196 SplatBits.insertBits(*Bits, I * Bits->getBitWidth());

197 continue;

198 }

199 return std::nullopt;

200 }

201 return SplatBits;

202 }

203 }

204

205 return std::nullopt;

206}

207

208

209

211 const APInt &Bits, unsigned NumSclBits) {

212 unsigned BitWidth = Bits.getBitWidth();

213

214 if (NumSclBits == 8) {

216 for (unsigned I = 0; I != BitWidth; I += 8)

217 RawBits.push_back(Bits.extractBits(8, I).getZExtValue());

219 }

220

221 if (NumSclBits == 16) {

223 for (unsigned I = 0; I != BitWidth; I += 16)

224 RawBits.push_back(Bits.extractBits(16, I).getZExtValue());

228 }

229

230 if (NumSclBits == 32) {

232 for (unsigned I = 0; I != BitWidth; I += 32)

233 RawBits.push_back(Bits.extractBits(32, I).getZExtValue());

237 }

238

239 assert(NumSclBits == 64 && "Unhandled vector element width");

240

242 for (unsigned I = 0; I != BitWidth; I += 64)

243 RawBits.push_back(Bits.extractBits(64, I).getZExtValue());

247}

248

249

250

252 unsigned , unsigned SplatBitWidth) {

253

256 return nullptr;

257

258

259

260 Type *SclTy = C->getType()->getScalarType();

262 NumSclBits = std::min(NumSclBits, SplatBitWidth);

263

264

265 NumSclBits = (NumSclBits == 8 || NumSclBits == 16 || NumSclBits == 32)

266 ? NumSclBits

267 : 64;

268

269

271}

272

274 unsigned ,

275 unsigned ScalarBitWidth) {

276 Type *SclTy = C->getType()->getScalarType();

279

280 if (NumBits > ScalarBitWidth) {

281

283 if (Bits->countLeadingZeros() >= (NumBits - ScalarBitWidth)) {

284

285

286 if (ScalarBitWidth > NumSclBits && (ScalarBitWidth % NumSclBits) == 0)

288

289

290 APInt RawBits = Bits->zextOrTrunc(ScalarBitWidth);

291 return ConstantInt::get(Ctx, RawBits);

292 }

293 }

294 }

295

296 return nullptr;

297}

298

300 unsigned NumBits, unsigned NumElts,

301 unsigned SrcEltBitWidth) {

302 unsigned DstEltBitWidth = NumBits / NumElts;

303 assert((NumBits % NumElts) == 0 && (NumBits % SrcEltBitWidth) == 0 &&

304 (DstEltBitWidth % SrcEltBitWidth) == 0 &&

305 (DstEltBitWidth > SrcEltBitWidth) && "Illegal extension width");

306

308 assert((Bits->getBitWidth() / DstEltBitWidth) == NumElts &&

309 (Bits->getBitWidth() % DstEltBitWidth) == 0 &&

310 "Unexpected constant extension");

311

312

314 for (unsigned I = 0; I != NumElts; ++I) {

315 APInt Elt = Bits->extractBits(DstEltBitWidth, I * DstEltBitWidth);

317 (!IsSExt && Elt.getActiveBits() > SrcEltBitWidth))

318 return nullptr;

319 TruncBits.insertBits(Elt.trunc(SrcEltBitWidth), I * SrcEltBitWidth);

320 }

321

322 Type *Ty = C->getType();

323 return rebuildConstant(Ty->getContext(), Ty->getScalarType(), TruncBits,

324 SrcEltBitWidth);

325 }

326

327 return nullptr;

328}

330 unsigned NumElts, unsigned SrcEltBitWidth) {

331 return rebuildExtCst(C, true, NumBits, NumElts, SrcEltBitWidth);

332}

334 unsigned NumElts, unsigned SrcEltBitWidth) {

335 return rebuildExtCst(C, false, NumBits, NumElts, SrcEltBitWidth);

336}

337

338bool X86FixupVectorConstantsPass::processInstruction(MachineFunction &MF,

341 unsigned Opc = MI.getOpcode();

342 MachineConstantPool *CP = MI.getParent()->getParent()->getConstantPool();

343 bool HasSSE2 = ST->hasSSE2();

344 bool HasSSE41 = ST->hasSSE41();

345 bool HasAVX2 = ST->hasAVX2();

346 bool HasDQI = ST->hasDQI();

347 bool HasBWI = ST->hasBWI();

348 bool HasVLX = ST->hasVLX();

351

352 struct FixupEntry {

353 int Op;

354 int NumCstElts;

355 int MemBitWidth;

356 std::function<Constant *(const Constant *, unsigned, unsigned, unsigned)>

357 RebuildConstant;

358 };

359

360 auto NewOpcPreferable = [&](const FixupEntry &Fixup,

361 unsigned RegBitWidth) -> bool {

363 unsigned NewOpc = Fixup.Op;

366 unsigned BitsSaved = RegBitWidth - (Fixup.NumCstElts * Fixup.MemBitWidth);

367

368

369

370

371

374 if (OldTput != NewTput)

375 return NewTput < OldTput;

376

377 int LatTol = (BitsSaved + 127) / 128;

380 if (OldLat != NewLat)

381 return NewLat < (OldLat + LatTol);

382 }

383

384

385

386 return true;

387 };

388

390 unsigned OperandNo) {

391#ifdef EXPENSIVE_CHECKS

393 [](const FixupEntry &A, const FixupEntry &B) {

394 return (A.NumCstElts * A.MemBitWidth) <

395 (B.NumCstElts * B.MemBitWidth);

396 }) &&

397 "Constant fixup table not sorted in ascending constant size");

398#endif

400 "Unexpected number of operands!");

402 unsigned CstBitWidth = C->getType()->getPrimitiveSizeInBits();

403 RegBitWidth = RegBitWidth ? RegBitWidth : CstBitWidth;

404 for (const FixupEntry &Fixup : Fixups) {

405

406

407

408

409 if (Fixup.Op && (OptSize || NewOpcPreferable(Fixup, RegBitWidth))) {

410

411

412 if (Constant *NewCst = Fixup.RebuildConstant(

413 C, RegBitWidth, Fixup.NumCstElts, Fixup.MemBitWidth)) {

414 unsigned NewCPI =

415 CP->getConstantPoolIndex(NewCst, Align(Fixup.MemBitWidth / 8));

417 MI.getOperand(OperandNo + X86::AddrDisp).setIndex(NewCPI);

418 return true;

419 }

420 }

421 }

422 }

423 return false;

424 };

425

426

427

428

429

430

431

432

433

434

435 switch (Opc) {

436

437 case X86::MOVAPDrm:

438 case X86::MOVAPSrm:

439 case X86::MOVUPDrm:

440 case X86::MOVUPSrm: {

441

442 FixupEntry Fixups[] = {

445 return FixupConstant(Fixups, 128, 1);

446 }

447 case X86::VMOVAPDrm:

448 case X86::VMOVAPSrm:

449 case X86::VMOVUPDrm:

450 case X86::VMOVUPSrm: {

451 FixupEntry Fixups[] = {

452 {MultiDomain ? X86::VPMOVSXBQrm : 0, 2, 8, rebuildSExtCst},

453 {MultiDomain ? X86::VPMOVZXBQrm : 0, 2, 8, rebuildZExtCst},

456 {MultiDomain ? X86::VPMOVSXBDrm : 0, 4, 8, rebuildSExtCst},

457 {MultiDomain ? X86::VPMOVZXBDrm : 0, 4, 8, rebuildZExtCst},

458 {MultiDomain ? X86::VPMOVSXWQrm : 0, 2, 16, rebuildSExtCst},

459 {MultiDomain ? X86::VPMOVZXWQrm : 0, 2, 16, rebuildZExtCst},

462 {MultiDomain ? X86::VPMOVSXWDrm : 0, 4, 16, rebuildSExtCst},

463 {MultiDomain ? X86::VPMOVZXWDrm : 0, 4, 16, rebuildZExtCst},

464 {MultiDomain ? X86::VPMOVSXDQrm : 0, 2, 32, rebuildSExtCst},

465 {MultiDomain ? X86::VPMOVZXDQrm : 0, 2, 32, rebuildZExtCst}};

466 return FixupConstant(Fixups, 128, 1);

467 }

468 case X86::VMOVAPDYrm:

469 case X86::VMOVAPSYrm:

470 case X86::VMOVUPDYrm:

471 case X86::VMOVUPSYrm: {

472 FixupEntry Fixups[] = {

474 {HasAVX2 && MultiDomain ? X86::VPMOVSXBQYrm : 0, 4, 8, rebuildSExtCst},

475 {HasAVX2 && MultiDomain ? X86::VPMOVZXBQYrm : 0, 4, 8, rebuildZExtCst},

477 {HasAVX2 && MultiDomain ? X86::VPMOVSXBDYrm : 0, 8, 8, rebuildSExtCst},

478 {HasAVX2 && MultiDomain ? X86::VPMOVZXBDYrm : 0, 8, 8, rebuildZExtCst},

479 {HasAVX2 && MultiDomain ? X86::VPMOVSXWQYrm : 0, 4, 16, rebuildSExtCst},

480 {HasAVX2 && MultiDomain ? X86::VPMOVZXWQYrm : 0, 4, 16, rebuildZExtCst},

482 {HasAVX2 && MultiDomain ? X86::VPMOVSXWDYrm : 0, 8, 16, rebuildSExtCst},

483 {HasAVX2 && MultiDomain ? X86::VPMOVZXWDYrm : 0, 8, 16, rebuildZExtCst},

484 {HasAVX2 && MultiDomain ? X86::VPMOVSXDQYrm : 0, 4, 32, rebuildSExtCst},

485 {HasAVX2 && MultiDomain ? X86::VPMOVZXDQYrm : 0, 4, 32,

487 return FixupConstant(Fixups, 256, 1);

488 }

489 case X86::VMOVAPDZ128rm:

490 case X86::VMOVAPSZ128rm:

491 case X86::VMOVUPDZ128rm:

492 case X86::VMOVUPSZ128rm: {

493 FixupEntry Fixups[] = {

494 {MultiDomain ? X86::VPMOVSXBQZ128rm : 0, 2, 8, rebuildSExtCst},

495 {MultiDomain ? X86::VPMOVZXBQZ128rm : 0, 2, 8, rebuildZExtCst},

498 {MultiDomain ? X86::VPMOVSXBDZ128rm : 0, 4, 8, rebuildSExtCst},

499 {MultiDomain ? X86::VPMOVZXBDZ128rm : 0, 4, 8, rebuildZExtCst},

500 {MultiDomain ? X86::VPMOVSXWQZ128rm : 0, 2, 16, rebuildSExtCst},

501 {MultiDomain ? X86::VPMOVZXWQZ128rm : 0, 2, 16, rebuildZExtCst},

504 {MultiDomain ? X86::VPMOVSXWDZ128rm : 0, 4, 16, rebuildSExtCst},

505 {MultiDomain ? X86::VPMOVZXWDZ128rm : 0, 4, 16, rebuildZExtCst},

506 {MultiDomain ? X86::VPMOVSXDQZ128rm : 0, 2, 32, rebuildSExtCst},

507 {MultiDomain ? X86::VPMOVZXDQZ128rm : 0, 2, 32, rebuildZExtCst}};

508 return FixupConstant(Fixups, 128, 1);

509 }

510 case X86::VMOVAPDZ256rm:

511 case X86::VMOVAPSZ256rm:

512 case X86::VMOVUPDZ256rm:

513 case X86::VMOVUPSZ256rm: {

514 FixupEntry Fixups[] = {

516 {MultiDomain ? X86::VPMOVSXBQZ256rm : 0, 4, 8, rebuildSExtCst},

517 {MultiDomain ? X86::VPMOVZXBQZ256rm : 0, 4, 8, rebuildZExtCst},

519 {MultiDomain ? X86::VPMOVSXBDZ256rm : 0, 8, 8, rebuildSExtCst},

520 {MultiDomain ? X86::VPMOVZXBDZ256rm : 0, 8, 8, rebuildZExtCst},

521 {MultiDomain ? X86::VPMOVSXWQZ256rm : 0, 4, 16, rebuildSExtCst},

522 {MultiDomain ? X86::VPMOVZXWQZ256rm : 0, 4, 16, rebuildZExtCst},

524 {MultiDomain ? X86::VPMOVSXWDZ256rm : 0, 8, 16, rebuildSExtCst},

525 {MultiDomain ? X86::VPMOVZXWDZ256rm : 0, 8, 16, rebuildZExtCst},

526 {MultiDomain ? X86::VPMOVSXDQZ256rm : 0, 4, 32, rebuildSExtCst},

527 {MultiDomain ? X86::VPMOVZXDQZ256rm : 0, 4, 32, rebuildZExtCst}};

528 return FixupConstant(Fixups, 256, 1);

529 }

530 case X86::VMOVAPDZrm:

531 case X86::VMOVAPSZrm:

532 case X86::VMOVUPDZrm:

533 case X86::VMOVUPSZrm: {

534 FixupEntry Fixups[] = {

537 {MultiDomain ? X86::VPMOVSXBQZrm : 0, 8, 8, rebuildSExtCst},

538 {MultiDomain ? X86::VPMOVZXBQZrm : 0, 8, 8, rebuildZExtCst},

540 {MultiDomain ? X86::VPMOVSXBDZrm : 0, 16, 8, rebuildSExtCst},

541 {MultiDomain ? X86::VPMOVZXBDZrm : 0, 16, 8, rebuildZExtCst},

542 {MultiDomain ? X86::VPMOVSXWQZrm : 0, 8, 16, rebuildSExtCst},

543 {MultiDomain ? X86::VPMOVZXWQZrm : 0, 8, 16, rebuildZExtCst},

545 {MultiDomain ? X86::VPMOVSXWDZrm : 0, 16, 16, rebuildSExtCst},

546 {MultiDomain ? X86::VPMOVZXWDZrm : 0, 16, 16, rebuildZExtCst},

547 {MultiDomain ? X86::VPMOVSXDQZrm : 0, 8, 32, rebuildSExtCst},

548 {MultiDomain ? X86::VPMOVZXDQZrm : 0, 8, 32, rebuildZExtCst}};

549 return FixupConstant(Fixups, 512, 1);

550 }

551

552 case X86::MOVDQArm:

553 case X86::MOVDQUrm: {

554 FixupEntry Fixups[] = {

555 {HasSSE41 ? X86::PMOVSXBQrm : 0, 2, 8, rebuildSExtCst},

556 {HasSSE41 ? X86::PMOVZXBQrm : 0, 2, 8, rebuildZExtCst},

558 {HasSSE41 ? X86::PMOVSXBDrm : 0, 4, 8, rebuildSExtCst},

559 {HasSSE41 ? X86::PMOVZXBDrm : 0, 4, 8, rebuildZExtCst},

560 {HasSSE41 ? X86::PMOVSXWQrm : 0, 2, 16, rebuildSExtCst},

561 {HasSSE41 ? X86::PMOVZXWQrm : 0, 2, 16, rebuildZExtCst},

563 {HasSSE41 ? X86::PMOVSXBWrm : 0, 8, 8, rebuildSExtCst},

564 {HasSSE41 ? X86::PMOVZXBWrm : 0, 8, 8, rebuildZExtCst},

565 {HasSSE41 ? X86::PMOVSXWDrm : 0, 4, 16, rebuildSExtCst},

566 {HasSSE41 ? X86::PMOVZXWDrm : 0, 4, 16, rebuildZExtCst},

567 {HasSSE41 ? X86::PMOVSXDQrm : 0, 2, 32, rebuildSExtCst},

568 {HasSSE41 ? X86::PMOVZXDQrm : 0, 2, 32, rebuildZExtCst}};

569 return FixupConstant(Fixups, 128, 1);

570 }

571 case X86::VMOVDQArm:

572 case X86::VMOVDQUrm: {

573 FixupEntry Fixups[] = {

574 {HasAVX2 ? X86::VPBROADCASTBrm : 0, 1, 8, rebuildSplatCst},

575 {HasAVX2 ? X86::VPBROADCASTWrm : 0, 1, 16, rebuildSplatCst},

579 {HasAVX2 ? X86::VPBROADCASTDrm : X86::VBROADCASTSSrm, 1, 32,

586 {HasAVX2 ? X86::VPBROADCASTQrm : X86::VMOVDDUPrm, 1, 64,

594 return FixupConstant(Fixups, 128, 1);

595 }

596 case X86::VMOVDQAYrm:

597 case X86::VMOVDQUYrm: {

598 FixupEntry Fixups[] = {

599 {HasAVX2 ? X86::VPBROADCASTBYrm : 0, 1, 8, rebuildSplatCst},

600 {HasAVX2 ? X86::VPBROADCASTWYrm : 0, 1, 16, rebuildSplatCst},

601 {HasAVX2 ? X86::VPBROADCASTDYrm : X86::VBROADCASTSSYrm, 1, 32,

603 {HasAVX2 ? X86::VPMOVSXBQYrm : 0, 4, 8, rebuildSExtCst},

604 {HasAVX2 ? X86::VPMOVZXBQYrm : 0, 4, 8, rebuildZExtCst},

605 {HasAVX2 ? X86::VPBROADCASTQYrm : X86::VBROADCASTSDYrm, 1, 64,

607 {HasAVX2 ? X86::VPMOVSXBDYrm : 0, 8, 8, rebuildSExtCst},

608 {HasAVX2 ? X86::VPMOVZXBDYrm : 0, 8, 8, rebuildZExtCst},

609 {HasAVX2 ? X86::VPMOVSXWQYrm : 0, 4, 16, rebuildSExtCst},

610 {HasAVX2 ? X86::VPMOVZXWQYrm : 0, 4, 16, rebuildZExtCst},

611 {HasAVX2 ? X86::VBROADCASTI128rm : X86::VBROADCASTF128rm, 1, 128,

613 {HasAVX2 ? X86::VPMOVSXBWYrm : 0, 16, 8, rebuildSExtCst},

614 {HasAVX2 ? X86::VPMOVZXBWYrm : 0, 16, 8, rebuildZExtCst},

615 {HasAVX2 ? X86::VPMOVSXWDYrm : 0, 8, 16, rebuildSExtCst},

616 {HasAVX2 ? X86::VPMOVZXWDYrm : 0, 8, 16, rebuildZExtCst},

617 {HasAVX2 ? X86::VPMOVSXDQYrm : 0, 4, 32, rebuildSExtCst},

618 {HasAVX2 ? X86::VPMOVZXDQYrm : 0, 4, 32, rebuildZExtCst}};

619 return FixupConstant(Fixups, 256, 1);

620 }

621 case X86::VMOVDQA32Z128rm:

622 case X86::VMOVDQA64Z128rm:

623 case X86::VMOVDQU32Z128rm:

624 case X86::VMOVDQU64Z128rm: {

625 FixupEntry Fixups[] = {

626 {HasBWI ? X86::VPBROADCASTBZ128rm : 0, 1, 8, rebuildSplatCst},

627 {HasBWI ? X86::VPBROADCASTWZ128rm : 0, 1, 16, rebuildSplatCst},

638 {HasBWI ? X86::VPMOVSXBWZ128rm : 0, 8, 8, rebuildSExtCst},

639 {HasBWI ? X86::VPMOVZXBWZ128rm : 0, 8, 8, rebuildZExtCst},

644 return FixupConstant(Fixups, 128, 1);

645 }

646 case X86::VMOVDQA32Z256rm:

647 case X86::VMOVDQA64Z256rm:

648 case X86::VMOVDQU32Z256rm:

649 case X86::VMOVDQU64Z256rm: {

650 FixupEntry Fixups[] = {

651 {HasBWI ? X86::VPBROADCASTBZ256rm : 0, 1, 8, rebuildSplatCst},

652 {HasBWI ? X86::VPBROADCASTWZ256rm : 0, 1, 16, rebuildSplatCst},

662 {HasBWI ? X86::VPMOVSXBWZ256rm : 0, 16, 8, rebuildSExtCst},

663 {HasBWI ? X86::VPMOVZXBWZ256rm : 0, 16, 8, rebuildZExtCst},

668 return FixupConstant(Fixups, 256, 1);

669 }

670 case X86::VMOVDQA32Zrm:

671 case X86::VMOVDQA64Zrm:

672 case X86::VMOVDQU32Zrm:

673 case X86::VMOVDQU64Zrm: {

674 FixupEntry Fixups[] = {

675 {HasBWI ? X86::VPBROADCASTBZrm : 0, 1, 8, rebuildSplatCst},

676 {HasBWI ? X86::VPBROADCASTWZrm : 0, 1, 16, rebuildSplatCst},

687 {HasBWI ? X86::VPMOVSXBWZrm : 0, 32, 8, rebuildSExtCst},

688 {HasBWI ? X86::VPMOVZXBWZrm : 0, 32, 8, rebuildZExtCst},

693 return FixupConstant(Fixups, 512, 1);

694 }

695 }

696

697 auto ConvertToBroadcast = [&](unsigned OpSrc, int BW) {

698 if (OpSrc) {

699 if (const X86FoldTableEntry *Mem2Bcst =

701 unsigned OpBcst = Mem2Bcst->DstOp;

702 unsigned OpNoBcst = Mem2Bcst->Flags & TB_INDEX_MASK;

704

705

706 return FixupConstant(Fixups, 0, OpNoBcst);

707 }

708 }

709 return false;

710 };

711

712

713

715 return ConvertToBroadcast(Opc, 32) || ConvertToBroadcast(Opc, 64);

716

717

718

719 if (HasVLX && !HasDQI) {

720 unsigned OpSrc32 = 0, OpSrc64 = 0;

721 switch (Opc) {

722 case X86::VANDPDrm:

723 case X86::VANDPSrm:

724 case X86::VPANDrm:

725 OpSrc32 = X86 ::VPANDDZ128rm;

726 OpSrc64 = X86 ::VPANDQZ128rm;

727 break;

728 case X86::VANDPDYrm:

729 case X86::VANDPSYrm:

730 case X86::VPANDYrm:

731 OpSrc32 = X86 ::VPANDDZ256rm;

732 OpSrc64 = X86 ::VPANDQZ256rm;

733 break;

734 case X86::VANDNPDrm:

735 case X86::VANDNPSrm:

736 case X86::VPANDNrm:

737 OpSrc32 = X86 ::VPANDNDZ128rm;

738 OpSrc64 = X86 ::VPANDNQZ128rm;

739 break;

740 case X86::VANDNPDYrm:

741 case X86::VANDNPSYrm:

742 case X86::VPANDNYrm:

743 OpSrc32 = X86 ::VPANDNDZ256rm;

744 OpSrc64 = X86 ::VPANDNQZ256rm;

745 break;

746 case X86::VORPDrm:

747 case X86::VORPSrm:

748 case X86::VPORrm:

749 OpSrc32 = X86 ::VPORDZ128rm;

750 OpSrc64 = X86 ::VPORQZ128rm;

751 break;

752 case X86::VORPDYrm:

753 case X86::VORPSYrm:

754 case X86::VPORYrm:

755 OpSrc32 = X86 ::VPORDZ256rm;

756 OpSrc64 = X86 ::VPORQZ256rm;

757 break;

758 case X86::VXORPDrm:

759 case X86::VXORPSrm:

760 case X86::VPXORrm:

761 OpSrc32 = X86 ::VPXORDZ128rm;

762 OpSrc64 = X86 ::VPXORQZ128rm;

763 break;

764 case X86::VXORPDYrm:

765 case X86::VXORPSYrm:

766 case X86::VPXORYrm:

767 OpSrc32 = X86 ::VPXORDZ256rm;

768 OpSrc64 = X86 ::VPXORQZ256rm;

769 break;

770 }

771 if (OpSrc32 || OpSrc64)

772 return ConvertToBroadcast(OpSrc32, 32) || ConvertToBroadcast(OpSrc64, 64);

773 }

774

775 return false;

776}

777

778bool X86FixupVectorConstantsPass::runOnMachineFunction(MachineFunction &MF) {

779 LLVM_DEBUG(dbgs() << "Start X86FixupVectorConstants\n";);

783 SM = &ST->getSchedModel();

784

785 for (MachineBasicBlock &MBB : MF) {

786 for (MachineInstr &MI : MBB) {

787 if (processInstruction(MF, MBB, MI)) {

788 ++NumInstChanges;

790 }

791 }

792 }

793 LLVM_DEBUG(dbgs() << "End X86FixupVectorConstants\n";);

795}

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

const TargetInstrInfo & TII

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

const size_t AbstractManglingParser< Derived, Alloc >::NumOps

This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...

PowerPC TLS Dynamic Call Fixup

#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)

This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...

#define STATISTIC(VARNAME, DESC)

static Constant * rebuildSplatCst(const Constant *C, unsigned, unsigned, unsigned SplatBitWidth)

Definition X86FixupVectorConstants.cpp:251

static std::optional< APInt > getSplatableConstant(const Constant *C, unsigned SplatBitWidth)

Definition X86FixupVectorConstants.cpp:157

static Constant * rebuildZExtCst(const Constant *C, unsigned NumBits, unsigned NumElts, unsigned SrcEltBitWidth)

Definition X86FixupVectorConstants.cpp:333

static std::optional< APInt > extractConstantBits(const Constant *C)

Definition X86FixupVectorConstants.cpp:84

static Constant * getSplatValueAllowUndef(const ConstantVector *C)

Normally, we only allow poison in vector splats.

Definition X86FixupVectorConstants.cpp:69

static Constant * rebuildExtCst(const Constant *C, bool IsSExt, unsigned NumBits, unsigned NumElts, unsigned SrcEltBitWidth)

Definition X86FixupVectorConstants.cpp:299

static Constant * rebuildZeroUpperCst(const Constant *C, unsigned NumBits, unsigned, unsigned ScalarBitWidth)

Definition X86FixupVectorConstants.cpp:273

static Constant * rebuildSExtCst(const Constant *C, unsigned NumBits, unsigned NumElts, unsigned SrcEltBitWidth)

Definition X86FixupVectorConstants.cpp:329

static Constant * rebuildConstant(LLVMContext &Ctx, Type *SclTy, const APInt &Bits, unsigned NumSclBits)

Definition X86FixupVectorConstants.cpp:210

Class for arbitrary precision integers.

unsigned getActiveBits() const

Compute the number of active bits in the value.

LLVM_ABI APInt trunc(unsigned width) const

Truncate to new width.

static LLVM_ABI APInt getSplat(unsigned NewLen, const APInt &V)

Return a value containing V broadcasted over NewLen bits.

unsigned getSignificantBits() const

Get the minimum bit size for this signed APInt.

LLVM_ABI void insertBits(const APInt &SubBits, unsigned bitPosition)

Insert the bits from a smaller APInt starting at bitPosition.

static APInt getZero(unsigned numBits)

Get the '0' value for the specified bit-width.

static LLVM_ABI Constant * get(LLVMContext &Context, ArrayRef< uint8_t > Elts)

get() constructors - Return a constant with vector type with an element count and element type matchi...

static LLVM_ABI Constant * getFP(Type *ElementType, ArrayRef< uint16_t > Elts)

getFP() constructors - Return a constant of vector type with a float element type taken from argument...

Constant Vector Declarations.

This is an important base class in LLVM.

LLVM_ABI Constant * getAggregateElement(unsigned Elt) const

For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...

FunctionPass class - This class is used to implement most global optimizations.

bool hasOptSize() const

Optimize this function for size (-Os) or minimum size (-Oz).

This is an important class for using LLVM in a threaded context.

unsigned getSchedClass() const

Return the scheduling class for this instruction.

const MCInstrDesc & get(unsigned Opcode) const

Return the machine instruction descriptor that corresponds to the specified instruction opcode.

MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...

const TargetSubtargetInfo & getSubtarget() const

getSubtarget - Return the subtarget for which this machine code is being compiled.

Function & getFunction()

Return the LLVM function that this machine code represents.

Representation of each machine instruction.

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

The instances of the Type class are immutable: once they are created, they are never changed.

bool isFloatTy() const

Return true if this is 'float', a 32-bit IEEE fp type.

bool is16bitFPTy() const

Return true if this is a 16-bit float type.

LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY

Return the basic size of this type if it is a primitive type.

bool isDoubleTy() const

Return true if this is 'double', a 64-bit IEEE fp type.

Value * getOperand(unsigned i) const

LLVM Value Representation.

const X86InstrInfo * getInstrInfo() const override

bool hasNoDomainDelayMov() const

constexpr char Align[]

Key for Kernel::Arg::Metadata::mAlign.

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

@ C

The default llvm calling convention, compatible with C.

@ EVEX

EVEX - Specifies that this instruction use EVEX form which provides syntax support up to 32 512-bit r...

const Constant * getConstantFromPool(const MachineInstr &MI, unsigned OpNo)

Find any constant pool entry associated with a specific instruction operand.

This is an optimization pass for GlobalISel generic memory operations.

const X86FoldTableEntry * lookupBroadcastFoldTableBySize(unsigned MemOp, unsigned BroadcastBits)

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

LLVM_ABI raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

bool is_sorted(R &&Range, Compare C)

Wrapper function around std::is_sorted to check if elements in a range R are sorted with respect to a...

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

DWARFExpression::Operation Op

ArrayRef(const T &OneElt) -> ArrayRef< T >

constexpr unsigned BitWidth

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

FunctionPass * createX86FixupVectorConstants()

Return a pass that reduces the size of vector constant pool loads.

Definition X86FixupVectorConstants.cpp:62

const MCSchedClassDesc * getSchedClassDesc(unsigned SchedClassIdx) const

bool hasInstrSchedModel() const

Does this machine model include instruction-level scheduling.

static LLVM_ABI int computeInstrLatency(const MCSubtargetInfo &STI, const MCSchedClassDesc &SCDesc)

Returns the latency value for the scheduling class.

static LLVM_ABI double getReciprocalThroughput(const MCSubtargetInfo &STI, const MCSchedClassDesc &SCDesc)