LLVM: lib/Transforms/InstCombine/InstCombineVectorOps.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

39#include

40#include

41#include

42#include

43

44#define DEBUG_TYPE "instcombine"

45

46using namespace llvm;

47using namespace PatternMatch;

48

49STATISTIC(NumAggregateReconstructionsSimplified,

50 "Number of aggregate reconstructions turned into reuse of the "

51 "original aggregate");

52

53

54

55

56

57

59 ConstantInt *CEI = dyn_cast(EI);

60

61

62 if (auto *C = dyn_cast(V))

63 return CEI || C->getSplatValue();

64

65 if (CEI && match(V, m_IntrinsicIntrinsic::stepvector())) {

66 ElementCount EC = cast(V->getType())->getElementCount();

67

68

69 return CEI->getValue().ult(EC.getKnownMinValue());

70 }

71

72

73

74

76 return CEI;

77

79 return true;

80

82 return true;

83

87 return true;

88

92 return true;

93

94 return false;

95}

96

97

98

99

103

104

105

106

108 for (auto *U : PN->users()) {

112 else

113 return nullptr;

114 } else if (!PHIUser) {

115 PHIUser = cast(U);

116 } else {

117 return nullptr;

118 }

119 }

120

121 if (!PHIUser)

122 return nullptr;

123

124

125

126

128 !(isa(PHIUser)) ||

130 return nullptr;

131

132

133

136

141

142 if (PHIInVal == PHIUser) {

143

144

145

147 unsigned opId = (B0->getOperand(0) == PN) ? 1 : 0;

155 scalarPHI->addIncoming(newPHIUser, inBB);

156 } else {

157

159

160 Instruction *pos = dyn_cast(PHIInVal);

162 if (pos && !isa(pos)) {

164 } else {

166 }

167

169

171 }

172 }

173

174 for (auto *E : Extracts) {

176

178 }

179

180 return &EI;

181}

182

188 return nullptr;

189

191 cast(Ext.getVectorOperandType())->getElementCount();

192 Type *DestTy = Ext.getType();

195

196

197

198 if (X->getType()->isIntegerTy()) {

199 assert(isa(Ext.getVectorOperand()->getType()) &&

200 "Expected fixed vector type for bitcast from scalar integer");

201

202

203

204

205 if (IsBigEndian)

207 unsigned ShiftAmountC = ExtIndexC * DestWidth;

208 if ((!ShiftAmountC ||

209 isDesirableIntType(X->getType()->getPrimitiveSizeInBits())) &&

210 Ext.getVectorOperand()->hasOneUse()) {

211 if (ShiftAmountC)

217 }

219 }

220 }

221

222 if (X->getType()->isVectorTy())

223 return nullptr;

224

225

226

227

228 auto *SrcTy = cast(X->getType());

229 ElementCount NumSrcElts = SrcTy->getElementCount();

230 if (NumSrcElts == NumElts)

233

235 "Src and Dst must be the same sort of vector type");

236

237

238

245 return nullptr;

246

247

248

249

250

251 unsigned NarrowingRatio =

253

254 if (ExtIndexC / NarrowingRatio != InsIndexC) {

255

256

257

258

259

260 if (X->hasOneUse() && Ext.getVectorOperand()->hasOneUse()) {

263 }

264 return nullptr;

265 }

266

267

268

269

270

271

272

273

274

275

276

277

278 unsigned Chunk = ExtIndexC % NarrowingRatio;

279 if (IsBigEndian)

280 Chunk = NarrowingRatio - 1 - Chunk;

281

282

283

284

285 bool NeedSrcBitcast = SrcTy->getScalarType()->isFloatingPointTy();

287 if (NeedSrcBitcast && NeedDestBitcast)

288 return nullptr;

289

290 unsigned SrcWidth = SrcTy->getScalarSizeInBits();

291 unsigned ShAmt = Chunk * DestWidth;

292

293

294

295

296 if (X->hasOneUse() || Ext.getVectorOperand()->hasOneUse())

297 if (NeedSrcBitcast || NeedDestBitcast)

298 return nullptr;

299

300 if (NeedSrcBitcast) {

303 }

304

305 if (ShAmt) {

306

307 if (Ext.getVectorOperand()->hasOneUse())

308 return nullptr;

310 }

311

312 if (NeedDestBitcast) {

315 }

316 return new TruncInst(Scalar, DestTy);

317 }

318

319 return nullptr;

320}

321

322

324 unsigned VWidth = cast(V->getType())->getNumElements();

325

326

328

329 switch (UserInstr->getOpcode()) {

330 case Instruction::ExtractElement: {

334 if (EEIIndexC && EEIIndexC->getValue().ult(VWidth)) {

336 }

337 break;

338 }

339 case Instruction::ShuffleVector: {

341 unsigned MaskNumElts =

342 cast(UserInstr->getType())->getNumElements();

343

344 UsedElts = APInt(VWidth, 0);

345 for (unsigned i = 0; i < MaskNumElts; i++) {

347 if (MaskVal == -1u || MaskVal >= 2 * VWidth)

348 continue;

349 if (Shuffle->getOperand(0) == V && (MaskVal < VWidth))

350 UsedElts.setBit(MaskVal);

352 ((MaskVal >= VWidth) && (MaskVal < 2 * VWidth)))

353 UsedElts.setBit(MaskVal - VWidth);

354 }

355 break;

356 }

357 default:

358 break;

359 }

360 return UsedElts;

361}

362

363

364

365

366

368 unsigned VWidth = cast(V->getType())->getNumElements();

369

370 APInt UnionUsedElts(VWidth, 0);

371 for (const Use &U : V->uses()) {

372 if (Instruction *I = dyn_cast(U.getUser())) {

374 } else {

376 break;

377 }

378

380 break;

381 }

382

383 return UnionUsedElts;

384}

385

386

387

388

389

391 const unsigned IndexBW = IndexC->getBitWidth();

393 return nullptr;

394 return ConstantInt::get(IndexC->getContext(),

396}

397

404

405

406

407

408

409

410

411

412

414 if (SI->getCondition()->getType()->isIntegerTy() &&

417 return R;

418

419

420

421 auto *IndexC = dyn_cast(Index);

422 bool HasKnownValidIndex = false;

423 if (IndexC) {

424

427

429 unsigned NumElts = EC.getKnownMinValue();

430 HasKnownValidIndex = IndexC->getValue().ult(NumElts);

431

432 if (IntrinsicInst *II = dyn_cast(SrcVec)) {

434

435

436 if (IID == Intrinsic::stepvector && IndexC->getValue().ult(NumElts)) {

440

441

442 if (IndexC->getValue().getActiveBits() <= BitWidth)

443 Idx = ConstantInt::get(Ty, IndexC->getValue().zextOrTrunc(BitWidth));

444 else

447 }

448 }

449

450

451

452 if (!EC.isScalable() && IndexC->getValue().uge(NumElts))

453 return nullptr;

454

456 return I;

457

458

459

460 if (auto *Phi = dyn_cast(SrcVec))

461 if (Instruction *ScalarPHI = scalarizePHI(EI, Phi))

462 return ScalarPHI;

463 }

464

465

466

469

473 }

474

475

476

479 (HasKnownValidIndex ||

481

486 }

487

492

495 CmpInst *SrcCmpInst = cast(SrcVec);

497 SrcCmpInst);

498 }

499

500 if (auto *I = dyn_cast(SrcVec)) {

501 if (auto *IE = dyn_cast(I)) {

502

503

504

505 if (isa(IE->getOperand(2)) && IndexC)

507 } else if (auto *GEP = dyn_cast(I)) {

508 auto *VecType = cast(GEP->getType());

509 ElementCount EC = VecType->getElementCount();

510 uint64_t IdxVal = IndexC ? IndexC->getZExtValue() : 0;

511 if (IndexC && IdxVal < EC.getKnownMinValue() && GEP->hasOneUse()) {

512

513

514

515

516

517

518

519

520 unsigned VectorOps =

522 return isa(V->getType());

523 });

524 if (VectorOps == 1) {

525 Value *NewPtr = GEP->getPointerOperand();

526 if (isa(NewPtr->getType()))

528

530 for (unsigned I = 1; I != GEP->getNumOperands(); ++I) {

532 if (isa(Op->getType()))

534 else

536 }

537

539 GEP->getSourceElementType(), NewPtr, NewOps);

541 return NewGEP;

542 }

543 }

544 } else if (auto *SVI = dyn_cast(I)) {

545

546

547

548 if (isa(SVI->getType()) && isa(Index)) {

549 int SrcIdx =

550 SVI->getMaskValue(cast(Index)->getZExtValue());

552 unsigned LHSWidth = cast(SVI->getOperand(0)->getType())

553 ->getNumElements();

554

555 if (SrcIdx < 0)

557 if (SrcIdx < (int)LHSWidth)

558 Src = SVI->getOperand(0);

559 else {

560 SrcIdx -= LHSWidth;

561 Src = SVI->getOperand(1);

562 }

565 Src, ConstantInt::get(Int64Ty, SrcIdx, false));

566 }

567 } else if (auto *CI = dyn_cast(I)) {

568

569

570

571 if (CI->hasOneUse() && (CI->getOpcode() != Instruction::BitCast)) {

574 }

575 }

576 }

577

578

579

580

581 if (IndexC) {

583 unsigned NumElts = EC.getKnownMinValue();

584

585

586

587 if (!EC.isScalable() && NumElts != 1) {

588

589

591 APInt PoisonElts(NumElts, 0);

592 APInt DemandedElts(NumElts, 0);

593 DemandedElts.setBit(IndexC->getZExtValue());

597 } else {

598

599

602 APInt PoisonElts(NumElts, 0);

604 SrcVec, DemandedElts, PoisonElts, 0 ,

605 true )) {

606 if (V != SrcVec) {

609 return &EI;

610 }

611 }

612 }

613 }

614 }

615 }

616 return nullptr;

617}

618

619

620

624 "Invalid CollectSingleShuffleElements");

625 unsigned NumElts = cast(V->getType())->getNumElements();

626

628 Mask.assign(NumElts, -1);

629 return true;

630 }

631

632 if (V == LHS) {

633 for (unsigned i = 0; i != NumElts; ++i)

634 Mask.push_back(i);

635 return true;

636 }

637

638 if (V == RHS) {

639 for (unsigned i = 0; i != NumElts; ++i)

640 Mask.push_back(i + NumElts);

641 return true;

642 }

643

645

646 Value *VecOp = IEI->getOperand(0);

647 Value *ScalarOp = IEI->getOperand(1);

648 Value *IdxOp = IEI->getOperand(2);

649

650 if (!isa(IdxOp))

651 return false;

652 unsigned InsertedIdx = cast(IdxOp)->getZExtValue();

653

654 if (isa(ScalarOp)) {

655

656

658

659 Mask[InsertedIdx] = -1;

660 return true;

661 }

662 } else if (ExtractElementInst *EI = dyn_cast(ScalarOp)){

663 if (isa(EI->getOperand(1))) {

664 unsigned ExtractedIdx =

665 cast(EI->getOperand(1))->getZExtValue();

666 unsigned NumLHSElts =

667 cast(LHS->getType())->getNumElements();

668

669

671

672

674

676 Mask[InsertedIdx % NumElts] = ExtractedIdx;

677 } else {

679 Mask[InsertedIdx % NumElts] = ExtractedIdx + NumLHSElts;

680 }

681 return true;

682 }

683 }

684 }

685 }

686 }

687

688 return false;

689}

690

691

692

693

697 auto *InsVecType = cast(InsElt->getType());

699 unsigned NumInsElts = InsVecType->getNumElements();

700 unsigned NumExtElts = ExtVecType->getNumElements();

701

702

703 if (InsVecType->getElementType() != ExtVecType->getElementType() ||

704 NumExtElts >= NumInsElts)

705 return false;

706

707

708

709

710

712 for (unsigned i = 0; i < NumExtElts; ++i)

714 for (unsigned i = NumExtElts; i < NumInsElts; ++i)

716

718 auto *ExtVecOpInst = dyn_cast(ExtVecOp);

719 BasicBlock *InsertionBlock = (ExtVecOpInst && !isa(ExtVecOpInst))

722

723

724

725

726

727

728

729

730

731

732 if (InsertionBlock != InsElt->getParent())

733 return false;

734

735

736

737

738

739

740 if (InsElt->hasOneUse() && isa(InsElt->user_back()))

741 return false;

742

744

745

746

747

748

749 if (ExtVecOpInst && !isa(ExtVecOpInst))

750 WideVec->insertAfter(ExtVecOpInst);

751 else

753

754

755

756 for (User *U : ExtVecOp->users()) {

758 if (!OldExt || OldExt->getParent() != WideVec->getParent())

759 continue;

763

764

766 }

767

768 return true;

769}

770

771

772

773

774

775

776

777

778

780

782 Value *PermittedRHS,

784 assert(V->getType()->isVectorTy() && "Invalid shuffle!");

785 unsigned NumElts = cast(V->getType())->getNumElements();

786

788 Mask.assign(NumElts, -1);

789 return std::make_pair(

791 }

792

793 if (isa(V)) {

794 Mask.assign(NumElts, 0);

795 return std::make_pair(V, nullptr);

796 }

797

799

800 Value *VecOp = IEI->getOperand(0);

801 Value *ScalarOp = IEI->getOperand(1);

802 Value *IdxOp = IEI->getOperand(2);

803

804 if (ExtractElementInst *EI = dyn_cast(ScalarOp)) {

805 if (isa(EI->getOperand(1)) && isa(IdxOp)) {

806 unsigned ExtractedIdx =

807 cast(EI->getOperand(1))->getZExtValue();

808 unsigned InsertedIdx = cast(IdxOp)->getZExtValue();

809

810

811

812 if (EI->getOperand(0) == PermittedRHS || PermittedRHS == nullptr) {

815 assert(LR.second == nullptr || LR.second == RHS);

816

817 if (LR.first->getType() != RHS->getType()) {

818

819

821 Rerun = true;

822

823

824

825 for (unsigned i = 0; i < NumElts; ++i)

826 Mask[i] = i;

827 return std::make_pair(V, nullptr);

828 }

829

830 unsigned NumLHSElts =

831 cast(RHS->getType())->getNumElements();

832 Mask[InsertedIdx % NumElts] = NumLHSElts + ExtractedIdx;

833 return std::make_pair(LR.first, RHS);

834 }

835

836 if (VecOp == PermittedRHS) {

837

838

839 unsigned NumLHSElts =

841 ->getNumElements();

842 for (unsigned i = 0; i != NumElts; ++i)

843 Mask.push_back(i == InsertedIdx ? ExtractedIdx : NumLHSElts + i);

844 return std::make_pair(EI->getOperand(0), PermittedRHS);

845 }

846

847

848

851 Mask))

852 return std::make_pair(EI->getOperand(0), PermittedRHS);

853 }

854 }

855 }

856

857

858 for (unsigned i = 0; i != NumElts; ++i)

859 Mask.push_back(i);

860 return std::make_pair(V, nullptr);

861}

862

863

864

865

866

867

871 unsigned NumAggElts;

875 break;

878 break;

879 default:

881 }

882

883

884

885

886 assert(NumAggElts > 0 && "Aggregate should have elements.");

887 if (NumAggElts > 2)

888 return nullptr;

889

890 static constexpr auto NotFound = std::nullopt;

891 static constexpr auto FoundMismatch = nullptr;

892

893

894

896

897

898 auto KnowAllElts = [&AggElts]() {

900 };

901

903

904

905

906 static const int DepthLimit = 2 * NumAggElts;

907

908

909

911 Depth < DepthLimit && CurrIVI && !KnowAllElts();

912 CurrIVI = dyn_cast(CurrIVI->getAggregateOperand()),

914 auto *InsertedValue =

915 dyn_cast(CurrIVI->getInsertedValueOperand());

916 if (!InsertedValue)

917 return nullptr;

918

920

921

922 if (Indices.size() != 1)

923 return nullptr;

924

925

926

927

928 std::optional<Instruction *> &Elt = AggElts[Indices.front()];

929 Elt = Elt.value_or(InsertedValue);

930

931

932 }

933

934

935 if (!KnowAllElts())

936 return nullptr;

937

938

939

940

941

942 enum class AggregateDescription {

943

944

945 NotFound,

946

947

948

949 Found,

950

951

952

953

954

955

956 FoundMismatch

957 };

958 auto Describe = [](std::optional<Value *> SourceAggregate) {

959 if (SourceAggregate == NotFound)

960 return AggregateDescription::NotFound;

961 if (*SourceAggregate == FoundMismatch)

962 return AggregateDescription::FoundMismatch;

963 return AggregateDescription::Found;

964 };

965

966

967 bool EltDefinedInUseBB = false;

968

969

970

971

972

973

974 auto FindSourceAggregate =

975 [&](Instruction *Elt, unsigned EltIdx, std::optional<BasicBlock *> UseBB,

976 std::optional<BasicBlock *> PredBB) -> std::optional<Value *> {

977

978 if (UseBB && PredBB) {

979 Elt = dyn_cast(Elt->DoPHITranslation(*UseBB, *PredBB));

980 if (Elt && Elt->getParent() == *UseBB)

981 EltDefinedInUseBB = true;

982 }

983

984

985

986 auto *EVI = dyn_cast_or_null(Elt);

987 if (!EVI)

988 return NotFound;

989

990 Value *SourceAggregate = EVI->getAggregateOperand();

991

992

993 if (SourceAggregate->getType() != AggTy)

994 return FoundMismatch;

995

996 if (EVI->getNumIndices() != 1 || EltIdx != EVI->getIndices().front())

997 return FoundMismatch;

998

999 return SourceAggregate;

1000 };

1001

1002

1003

1004

1005 auto FindCommonSourceAggregate =

1006 [&](std::optional<BasicBlock *> UseBB,

1007 std::optional<BasicBlock *> PredBB) -> std::optional<Value *> {

1008 std::optional<Value *> SourceAggregate;

1009

1011 assert(Describe(SourceAggregate) != AggregateDescription::FoundMismatch &&

1012 "We don't store nullptr in SourceAggregate!");

1013 assert((Describe(SourceAggregate) == AggregateDescription::Found) ==

1014 (I.index() != 0) &&

1015 "SourceAggregate should be valid after the first element,");

1016

1017

1018

1019

1020 std::optional<Value *> SourceAggregateForElement =

1021 FindSourceAggregate(*I.value(), I.index(), UseBB, PredBB);

1022

1023

1024

1025

1026

1027

1028 if (Describe(SourceAggregateForElement) != AggregateDescription::Found)

1029 return SourceAggregateForElement;

1030

1031

1032

1033 switch (Describe(SourceAggregate)) {

1034 case AggregateDescription::NotFound:

1035

1036 SourceAggregate = SourceAggregateForElement;

1037 continue;

1038 case AggregateDescription::Found:

1039

1040

1041 if (*SourceAggregateForElement != *SourceAggregate)

1042 return FoundMismatch;

1043 continue;

1044 case AggregateDescription::FoundMismatch:

1045 llvm_unreachable("Can't happen. We would have early-exited then.");

1046 };

1047 }

1048

1049 assert(Describe(SourceAggregate) == AggregateDescription::Found &&

1050 "Must be a valid Value");

1051 return *SourceAggregate;

1052 };

1053

1054 std::optional<Value *> SourceAggregate;

1055

1056

1057 SourceAggregate = FindCommonSourceAggregate(std::nullopt,

1058 std::nullopt);

1059 if (Describe(SourceAggregate) != AggregateDescription::NotFound) {

1060 if (Describe(SourceAggregate) == AggregateDescription::FoundMismatch)

1061 return nullptr;

1062 ++NumAggregateReconstructionsSimplified;

1064 }

1065

1066

1067

1068

1069

1070

1071

1072

1074

1075 for (const std::optional<Instruction *> &I : AggElts) {

1077

1078 if (!UseBB) {

1079 UseBB = BB;

1080 continue;

1081 }

1082

1083 if (UseBB != BB)

1084 return nullptr;

1085 }

1086

1087

1088

1089

1090 if (!UseBB)

1091 return nullptr;

1092

1093

1094

1096 return nullptr;

1097

1098

1099 static const int PredCountLimit = 64;

1100

1101

1102

1105

1106 if (Preds.size() >= PredCountLimit)

1107 return nullptr;

1109 }

1110

1111

1112

1113

1115 bool FoundSrcAgg = false;

1117 std::pair<decltype(SourceAggregates)::iterator, bool> IV =

1118 SourceAggregates.insert({Pred, nullptr});

1119

1120 if (IV.second)

1121 continue;

1122

1123

1124

1125

1126 SourceAggregate = FindCommonSourceAggregate(UseBB, Pred);

1127 if (Describe(SourceAggregate) == AggregateDescription::Found) {

1128 FoundSrcAgg = true;

1129 IV.first->second = *SourceAggregate;

1130 } else {

1131

1132

1133 auto *BI = dyn_cast(Pred->getTerminator());

1134 if (!BI || !BI->isUnconditional())

1135 return nullptr;

1136 }

1137 }

1138

1139 if (!FoundSrcAgg)

1140 return nullptr;

1141

1142

1143 auto OrigBB = OrigIVI.getParent();

1144 for (auto &It : SourceAggregates) {

1145 if (Describe(It.second) == AggregateDescription::Found)

1146 continue;

1147

1148

1149 if (EltDefinedInUseBB)

1150 return nullptr;

1151

1152

1153

1154

1155

1156

1157 if (UseBB != OrigBB)

1158 return nullptr;

1159

1160

1161

1162 bool ConstAgg = true;

1163 for (auto Val : AggElts) {

1165 if (!isa(Elt)) {

1166 ConstAgg = false;

1167 break;

1168 }

1169 }

1170 if (ConstAgg)

1171 return nullptr;

1172 }

1173

1174

1175

1176 for (auto &It : SourceAggregates) {

1177 if (Describe(It.second) == AggregateDescription::Found)

1178 continue;

1179

1183 for (auto [Idx, Val] : enumerate(AggElts)) {

1185 V = Builder.CreateInsertValue(V, Elt, Idx);

1186 }

1187

1188 It.second = V;

1189 }

1190

1191

1192

1193

1194

1195

1197 Builder.SetInsertPoint(UseBB, UseBB->getFirstNonPHIIt());

1198 auto *PHI =

1199 Builder.CreatePHI(AggTy, Preds.size(), OrigIVI.getName() + ".merged");

1201 PHI->addIncoming(SourceAggregates[Pred], Pred);

1202

1203 ++NumAggregateReconstructionsSimplified;

1204 return replaceInstUsesWith(OrigIVI, PHI);

1205}

1206

1207

1208

1209

1210

1211

1212

1213

1216 I.getAggregateOperand(), I.getInsertedValueOperand(), I.getIndices(),

1219

1220 bool IsRedundant = false;

1222

1223

1224

1225

1226

1228 unsigned Depth = 0;

1229 while (V->hasOneUse() && Depth < 10) {

1230 User *U = V->user_back();

1231 auto UserInsInst = dyn_cast(U);

1232 if (!UserInsInst || U->getOperand(0) != V)

1233 break;

1234 if (UserInsInst->getIndices() == FirstIndices) {

1235 IsRedundant = true;

1236 break;

1237 }

1238 V = UserInsInst;

1240 }

1241

1242 if (IsRedundant)

1244

1246 return NewI;

1247

1248 return nullptr;

1249}

1250

1252

1253

1255 return false;

1256

1258 int VecSize =

1259 cast(Shuf.getOperand(0)->getType())->getNumElements();

1260

1261

1262 if (MaskSize != VecSize)

1263 return false;

1264

1265

1266

1267 for (int i = 0; i != MaskSize; ++i) {

1269 if (Elt != -1 && Elt != i && Elt != i + VecSize)

1270 return false;

1271 }

1272

1273 return true;

1274}

1275

1276

1277

1278

1280

1281

1282 if (InsElt.hasOneUse() && isa(InsElt.user_back()))

1283 return nullptr;

1284

1286

1287

1288 if (isa(VecTy))

1289 return nullptr;

1290 unsigned NumElements = cast(VecTy)->getNumElements();

1291

1292

1293

1294 if (NumElements == 1)

1295 return nullptr;

1296

1301

1302

1303

1304 while (CurrIE) {

1305 auto *Idx = dyn_cast(CurrIE->getOperand(2));

1307 return nullptr;

1308

1309 auto *NextIE = dyn_cast(CurrIE->getOperand(0));

1310

1311

1312

1313 if (CurrIE != &InsElt &&

1314 (!CurrIE->hasOneUse() && (NextIE != nullptr || Idx->isZero())))

1315 return nullptr;

1316

1317 ElementPresent[Idx->getZExtValue()] = true;

1318 FirstIE = CurrIE;

1319 CurrIE = NextIE;

1320 }

1321

1322

1323 if (FirstIE == &InsElt)

1324 return nullptr;

1325

1326

1327

1328

1329

1331 if (!ElementPresent.all())

1332 return nullptr;

1333

1334

1337 Constant *Zero = ConstantInt::get(Int64Ty, 0);

1338 if (!cast(FirstIE->getOperand(2))->isZero())

1341

1342

1344 for (unsigned i = 0; i != NumElements; ++i)

1345 if (!ElementPresent[i])

1346 Mask[i] = -1;

1347

1349}

1350

1351

1352

1354

1355 auto *Shuf = dyn_cast(InsElt.getOperand(0));

1356 if (!Shuf || !Shuf->isZeroEltSplat())

1357 return nullptr;

1358

1359

1360

1361 if (isa(Shuf->getType()))

1362 return nullptr;

1363

1364

1367 return nullptr;

1368

1369

1371 Value *Op0 = Shuf->getOperand(0);

1373 return nullptr;

1374

1375

1376

1377

1378

1379 unsigned NumMaskElts =

1380 cast(Shuf->getType())->getNumElements();

1382 for (unsigned i = 0; i != NumMaskElts; ++i)

1383 NewMask[i] = i == IdxC ? 0 : Shuf->getMaskValue(i);

1384

1386}

1387

1388

1389

1391

1392 auto *Shuf = dyn_cast(InsElt.getOperand(0));

1393 if (!Shuf || match(Shuf->getOperand(1), m_Poison()) ||

1394 !(Shuf->isIdentityWithExtract() || Shuf->isIdentityWithPadding()))

1395 return nullptr;

1396

1397

1398

1399 if (isa(Shuf->getType()))

1400 return nullptr;

1401

1402

1405 return nullptr;

1406

1407

1408

1410 Value *X = Shuf->getOperand(0);

1412 return nullptr;

1413

1414

1415

1416

1417

1418 unsigned NumMaskElts =

1419 cast(Shuf->getType())->getNumElements();

1422 for (unsigned i = 0; i != NumMaskElts; ++i) {

1423 if (i != IdxC) {

1424

1425 NewMask[i] = OldMask[i];

1426 } else if (OldMask[i] == (int)IdxC) {

1427

1428

1429 return nullptr;

1430 } else {

1432 "Unexpected shuffle mask element for identity shuffle");

1433 NewMask[i] = IdxC;

1434 }

1435 }

1436

1438}

1439

1440

1441

1442

1443

1444

1445

1446

1447

1448

1451 auto *InsElt1 = dyn_cast(InsElt2.getOperand(0));

1452 if (!InsElt1 || !InsElt1->hasOneUse())

1453 return nullptr;

1454

1458 if (match(InsElt1->getOperand(0), m_Value(X)) &&

1459 match(InsElt1->getOperand(1), m_Value(Y)) && !isa(Y) &&

1465 }

1466

1467 return nullptr;

1468}

1469

1470

1471

1473 auto *Inst = dyn_cast(InsElt.getOperand(0));

1474

1475

1476 if (!Inst || !Inst->hasOneUse())

1477 return nullptr;

1478 if (auto *Shuf = dyn_cast(InsElt.getOperand(0))) {

1479

1480

1481 Constant *ShufConstVec, *InsEltScalar;

1483 if (match(Shuf->getOperand(1), m_Constant(ShufConstVec)) ||

1486 return nullptr;

1487

1488

1489

1490

1491

1492

1494 return nullptr;

1495

1496

1497

1498

1499

1500

1501

1502

1503

1505 unsigned NumElts = Mask.size();

1508 for (unsigned I = 0; I != NumElts; ++I) {

1509 if (I == InsEltIndex) {

1510 NewShufElts[I] = InsEltScalar;

1511 NewMaskElts[I] = InsEltIndex + NumElts;

1512 } else {

1513

1515 NewMaskElts[I] = Mask[I];

1516 }

1517

1518

1519 if (!NewShufElts[I])

1520 return nullptr;

1521 }

1522

1523

1524

1527 } else if (auto *IEI = dyn_cast(Inst)) {

1528

1529

1530

1531

1532 if (isa(InsElt.getType()))

1533 return nullptr;

1534 unsigned NumElts =

1535 cast(InsElt.getType())->getNumElements();

1536

1543 return nullptr;

1546 auto ValI = std::begin(Val);

1547

1548

1549

1551 if (!Values[I]) {

1552 Values[I] = *ValI;

1553 Mask[I] = NumElts + I;

1554 }

1555 ++ValI;

1556 }

1557

1558 for (unsigned I = 0; I < NumElts; ++I) {

1559 if (!Values[I]) {

1561 Mask[I] = I;

1562 }

1563 }

1564

1565

1568 }

1569 return nullptr;

1570}

1571

1572

1573

1574

1575

1578

1579

1580

1581

1584 return nullptr;

1585

1590 CastOpcode = Instruction::FPExt;

1592 CastOpcode = Instruction::SExt;

1594 CastOpcode = Instruction::ZExt;

1595 else

1596 return nullptr;

1597

1598

1599 if (X->getType()->getScalarType() != Y->getType())

1600 return nullptr;

1601

1602

1605}

1606

1607

1608

1610 bool IsBigEndian,

1615

1616

1617

1618

1619

1620

1621

1622

1623

1624

1625

1626 auto *VTy = dyn_cast(InsElt.getType());

1627 Value *Scalar0, *BaseVec;

1629 if (!VTy || (VTy->getNumElements() & 1) ||

1634 return nullptr;

1635

1636

1637

1638 if (Index0 + 1 != Index1 || Index0 & 1)

1639 return nullptr;

1640

1641

1642

1645 if (IsBigEndian) {

1648 return nullptr;

1649 } else {

1652 return nullptr;

1653 }

1654

1655 Type *SrcTy = X->getType();

1657 unsigned VecEltWidth = VTy->getScalarSizeInBits();

1658 if (ScalarWidth != VecEltWidth * 2 || ShAmt != VecEltWidth)

1659 return nullptr;

1660

1661

1664

1665

1666

1667 uint64_t NewIndex = IsBigEndian ? Index1 / 2 : Index0 / 2;

1670}

1671

1673 Value *VecOp = IE.getOperand(0);

1674 Value *ScalarOp = IE.getOperand(1);

1675 Value *IdxOp = IE.getOperand(2);

1676

1680

1681

1682 if (auto *IndexC = dyn_cast(IdxOp)) {

1685

1686 Value *BaseVec, *OtherScalar;

1691 !isa(OtherScalar) && OtherIndexVal > IndexC->getZExtValue()) {

1695 }

1696 }

1697

1698

1699

1700

1701 Value *ScalarSrc;

1706

1707

1709 Type *VecTy = VectorType::get(ScalarTy, IE.getType()->getElementCount());

1713 return new BitCastInst(NewInsElt, IE.getType());

1714 }

1715

1716

1717

1723 cast(VecSrc->getType())->getElementType() ==

1725

1726

1728 return new BitCastInst(NewInsElt, IE.getType());

1729 }

1730

1731

1732

1733

1734

1735 uint64_t InsertedIdx, ExtractedIdx;

1736 Value *ExtVecOp;

1737 if (isa(IE.getType()) &&

1741 isa(ExtVecOp->getType()) &&

1742 ExtractedIdx <

1743 cast(ExtVecOp->getType())->getNumElements()) {

1744

1745

1746

1747

1748

1749

1750

1751

1752

1753

1754

1755

1756

1757

1759 if (!Insert.hasOneUse())

1760 return true;

1761 auto *InsertUser = dyn_cast(Insert.user_back());

1762 if (!InsertUser)

1763 return true;

1764 return false;

1765 };

1766

1767

1768 if (isShuffleRootCandidate(IE)) {

1769 bool Rerun = true;

1770 while (Rerun) {

1771 Rerun = false;

1772

1776

1777

1778

1779 if (LR.first != &IE && LR.second != &IE) {

1780

1781 if (LR.second == nullptr)

1784 }

1785 }

1786 }

1787 }

1788

1789 if (auto VecTy = dyn_cast(VecOp->getType())) {

1790 unsigned VWidth = VecTy->getNumElements();

1791 APInt PoisonElts(VWidth, 0);

1794 PoisonElts)) {

1795 if (V != &IE)

1797 return &IE;

1798 }

1799 }

1800

1802 return Shuf;

1803

1805 return NewInsElt;

1806

1808 return Broadcast;

1809

1812

1814 return IdentityShuf;

1815

1817 return Ext;

1818

1820 return Ext;

1821

1822 return nullptr;

1823}

1824

1825

1826

1828 unsigned Depth = 5) {

1829

1830 if (isa(V))

1831 return true;

1832

1833

1835 if (I) return false;

1836

1837

1838 if (I->hasOneUse())

1839 return false;

1840

1841 if (Depth == 0) return false;

1842

1843 switch (I->getOpcode()) {

1844 case Instruction::UDiv:

1845 case Instruction::SDiv:

1846 case Instruction::URem:

1847 case Instruction::SRem:

1848

1849

1850

1852 return false;

1853 [[fallthrough]];

1854 case Instruction::Add:

1855 case Instruction::FAdd:

1856 case Instruction::Sub:

1857 case Instruction::FSub:

1858 case Instruction::Mul:

1859 case Instruction::FMul:

1860 case Instruction::FDiv:

1861 case Instruction::FRem:

1862 case Instruction::Shl:

1863 case Instruction::LShr:

1864 case Instruction::AShr:

1865 case Instruction::And:

1866 case Instruction::Or:

1867 case Instruction::Xor:

1868 case Instruction::ICmp:

1869 case Instruction::FCmp:

1870 case Instruction::Trunc:

1871 case Instruction::ZExt:

1872 case Instruction::SExt:

1873 case Instruction::FPToUI:

1874 case Instruction::FPToSI:

1875 case Instruction::UIToFP:

1876 case Instruction::SIToFP:

1877 case Instruction::FPTrunc:

1878 case Instruction::FPExt:

1879 case Instruction::GetElementPtr: {

1880

1881

1882 Type *ITy = I->getType();

1884 Mask.size() > cast(ITy)->getNumElements())

1885 return false;

1886 for (Value *Operand : I->operands()) {

1888 return false;

1889 }

1890 return true;

1891 }

1892 case Instruction::InsertElement: {

1893 ConstantInt *CI = dyn_cast(I->getOperand(2));

1894 if (!CI) return false;

1896

1897

1898

1899 bool SeenOnce = false;

1900 for (int I : Mask) {

1901 if (I == ElementNumber) {

1902 if (SeenOnce)

1903 return false;

1904 SeenOnce = true;

1905 }

1906 }

1908 }

1909 }

1910 return false;

1911}

1912

1913

1914

1918 switch (I->getOpcode()) {

1919 case Instruction::Add:

1920 case Instruction::FAdd:

1921 case Instruction::Sub:

1922 case Instruction::FSub:

1923 case Instruction::Mul:

1924 case Instruction::FMul:

1925 case Instruction::UDiv:

1926 case Instruction::SDiv:

1927 case Instruction::FDiv:

1928 case Instruction::URem:

1929 case Instruction::SRem:

1930 case Instruction::FRem:

1931 case Instruction::Shl:

1932 case Instruction::LShr:

1933 case Instruction::AShr:

1934 case Instruction::And:

1935 case Instruction::Or:

1936 case Instruction::Xor: {

1938 assert(NewOps.size() == 2 && "binary operator with #ops != 2");

1940 NewOps[0], NewOps[1]);

1941 if (auto *NewI = dyn_cast(New)) {

1942 if (isa(BO)) {

1945 }

1946 if (isa(BO)) {

1947 NewI->setIsExact(BO->isExact());

1948 }

1949 if (isa(BO))

1950 NewI->copyFastMathFlags(I);

1951 }

1952 return New;

1953 }

1954 case Instruction::ICmp:

1955 assert(NewOps.size() == 2 && "icmp with #ops != 2");

1956 return Builder.CreateICmp(cast(I)->getPredicate(), NewOps[0],

1957 NewOps[1]);

1958 case Instruction::FCmp:

1959 assert(NewOps.size() == 2 && "fcmp with #ops != 2");

1960 return Builder.CreateFCmp(cast(I)->getPredicate(), NewOps[0],

1961 NewOps[1]);

1962 case Instruction::Trunc:

1963 case Instruction::ZExt:

1964 case Instruction::SExt:

1965 case Instruction::FPToUI:

1966 case Instruction::FPToSI:

1967 case Instruction::UIToFP:

1968 case Instruction::SIToFP:

1969 case Instruction::FPTrunc:

1970 case Instruction::FPExt: {

1971

1972

1974 I->getType()->getScalarType(),

1975 cast(NewOps[0]->getType())->getElementCount());

1976 assert(NewOps.size() == 1 && "cast with #ops != 1");

1978 DestTy);

1979 }

1980 case Instruction::GetElementPtr: {

1983 return Builder.CreateGEP(cast(I)->getSourceElementType(),

1985 cast(I)->isInBounds());

1986 }

1987 }

1989}

1990

1993

1994

1995 assert(V->getType()->isVectorTy() && "can't reorder non-vector elements");

1996 Type *EltTy = V->getType()->getScalarType();

1997

1998 if (isa(V))

2000

2003

2004 if (isa(V))

2006

2007 if (Constant *C = dyn_cast(V))

2009 Mask);

2010

2012 switch (I->getOpcode()) {

2013 case Instruction::Add:

2014 case Instruction::FAdd:

2015 case Instruction::Sub:

2016 case Instruction::FSub:

2017 case Instruction::Mul:

2018 case Instruction::FMul:

2019 case Instruction::UDiv:

2020 case Instruction::SDiv:

2021 case Instruction::FDiv:

2022 case Instruction::URem:

2023 case Instruction::SRem:

2024 case Instruction::FRem:

2025 case Instruction::Shl:

2026 case Instruction::LShr:

2027 case Instruction::AShr:

2028 case Instruction::And:

2029 case Instruction::Or:

2030 case Instruction::Xor:

2031 case Instruction::ICmp:

2032 case Instruction::FCmp:

2033 case Instruction::Trunc:

2034 case Instruction::ZExt:

2035 case Instruction::SExt:

2036 case Instruction::FPToUI:

2037 case Instruction::FPToSI:

2038 case Instruction::UIToFP:

2039 case Instruction::SIToFP:

2040 case Instruction::FPTrunc:

2041 case Instruction::FPExt:

2042 case Instruction::Select:

2043 case Instruction::GetElementPtr: {

2045 bool NeedsRebuild =

2046 (Mask.size() !=

2047 cast(I->getType())->getNumElements());

2048 for (int i = 0, e = I->getNumOperands(); i != e; ++i) {

2050

2051

2052

2053 if (I->getOperand(i)->getType()->isVectorTy())

2055 else

2056 V = I->getOperand(i);

2058 NeedsRebuild |= (V != I->getOperand(i));

2059 }

2060 if (NeedsRebuild)

2061 return buildNew(I, NewOps, Builder);

2062 return I;

2063 }

2064 case Instruction::InsertElement: {

2065 int Element = cast(I->getOperand(2))->getLimitedValue();

2066

2067

2068

2069

2070 bool Found = false;

2071 int Index = 0;

2072 for (int e = Mask.size(); Index != e; ++Index) {

2073 if (Mask[Index] == Element) {

2074 Found = true;

2075 break;

2076 }

2077 }

2078

2079

2080

2081 if (!Found)

2083

2085 Builder);

2088 }

2089 }

2090 llvm_unreachable("failed to reorder elements of vector instruction!");

2091}

2092

2093

2094

2095

2096

2097

2098

2101 unsigned LHSElems =

2102 cast(SVI.getOperand(0)->getType())->getNumElements();

2103 unsigned MaskElems = Mask.size();

2104 unsigned BegIdx = Mask.front();

2105 unsigned EndIdx = Mask.back();

2106 if (BegIdx > EndIdx || EndIdx >= LHSElems || EndIdx - BegIdx != MaskElems - 1)

2107 return false;

2108 for (unsigned I = 0; I != MaskElems; ++I)

2109 if (static_cast<unsigned>(Mask[I]) != BegIdx + I)

2110 return false;

2111 return true;

2112}

2113

2114

2115

2121 Value *V0 = nullptr, Value *V1 = nullptr) :

2122 Opcode(Opc), Op0(V0), Op1(V1) {}

2123 operator bool() const { return Opcode != 0; }

2124};

2125

2126

2127

2128

2129

2134 case Instruction::Shl: {

2135

2139 Instruction::Shl, ConstantInt::get(Ty, 1), C, DL);

2140 assert(ShlOne && "Constant folding of immediate constants failed");

2141 return {Instruction::Mul, BO0, ShlOne};

2142 }

2143 break;

2144 }

2145 case Instruction::Or: {

2146

2147 if (cast(BO)->isDisjoint())

2148 return {Instruction::Add, BO0, BO1};

2149 break;

2150 }

2151 case Instruction::Sub:

2152

2155 break;

2156 default:

2157 break;

2158 }

2159 return {};

2160}

2161

2162

2163

2164

2166 assert(Shuf.isSelect() && "Must have select-equivalent shuffle");

2167

2171 unsigned NumElts = Mask.size();

2172

2173

2174 auto *ShufOp = dyn_cast(Op0);

2175 if (ShufOp && ShufOp->isSelect() &&

2176 (ShufOp->getOperand(0) == Op1 || ShufOp->getOperand(1) == Op1)) {

2179 }

2180

2181 ShufOp = dyn_cast(Op1);

2182 if (!ShufOp || !ShufOp->isSelect() ||

2183 (ShufOp->getOperand(0) != Op0 && ShufOp->getOperand(1) != Op0))

2184 return nullptr;

2185

2186 Value *X = ShufOp->getOperand(0), *Y = ShufOp->getOperand(1);

2188 ShufOp->getShuffleMask(Mask1);

2189 assert(Mask1.size() == NumElts && "Vector size changed with select shuffle");

2190

2191

2192 if (Y == Op0) {

2195 }

2196

2197

2198

2199

2200

2202 for (unsigned i = 0; i != NumElts; ++i)

2203 NewMask[i] = Mask[i] < (signed)NumElts ? Mask[i] : Mask1[i];

2204

2205

2208 "Unexpected shuffle mask");

2210}

2211

2214 assert(Shuf.isSelect() && "Must have select-equivalent shuffle");

2215

2216

2217

2220 bool Op0IsBinop;

2222 Op0IsBinop = true;

2224 Op0IsBinop = false;

2225 else

2226 return nullptr;

2227

2228

2229

2230

2231 auto *BO = cast(Op0IsBinop ? Op0 : Op1);

2234 if (!IdC)

2235 return nullptr;

2236

2237 Value *X = Op0IsBinop ? Op1 : Op0;

2238

2239

2240

2241

2242

2243

2244

2245

2248 return nullptr;

2249

2250

2251

2252

2253

2257

2258 bool MightCreatePoisonOrUB =

2261 if (MightCreatePoisonOrUB)

2263

2264

2265

2268

2269

2270

2271

2274 return NewBO;

2275}

2276

2277

2278

2279

2280

2287

2288

2292 return nullptr;

2293

2294

2297

2298

2299

2300

2301

2302 unsigned NumMaskElts =

2303 cast(Shuf.getType())->getNumElements();

2305 for (unsigned i = 0; i != NumMaskElts; ++i)

2307 NewMask[i] = Mask[i];

2308

2310}

2311

2312

2315 return nullptr;

2316

2317

2318

2319 unsigned NumElts = cast(Shuf.getType())->getNumElements();

2322

2323

2325 return &Shuf;

2326 }

2327

2329 return I;

2330

2333 return I;

2334

2338 return nullptr;

2339

2340

2341

2342

2344 Constant *C0 = nullptr, *C1 = nullptr;

2345 bool ConstantsAreOp1;

2348 ConstantsAreOp1 = false;

2353 ConstantsAreOp1 = true;

2354 else

2355 return nullptr;

2356

2357

2360 bool DropNSW = false;

2361 if (ConstantsAreOp1 && Opc0 != Opc1) {

2362

2363

2364

2365 if (Opc0 == Instruction::Shl || Opc1 == Instruction::Shl)

2366 DropNSW = true;

2368 assert(isa(AltB0.Op1) && "Expecting constant with alt binop");

2369 Opc0 = AltB0.Opcode;

2370 C0 = cast(AltB0.Op1);

2372 assert(isa(AltB1.Op1) && "Expecting constant with alt binop");

2373 Opc1 = AltB1.Opcode;

2374 C1 = cast(AltB1.Op1);

2375 }

2376 }

2377

2378 if (Opc0 != Opc1 || !C0 || !C1)

2379 return nullptr;

2380

2381

2383

2384

2387

2388

2389

2390

2391 bool MightCreatePoisonOrUB =

2394 if (MightCreatePoisonOrUB)

2396 ConstantsAreOp1);

2397

2399 if (X == Y) {

2400

2401

2402

2403 V = X;

2404 } else {

2405

2406

2407

2409 return nullptr;

2410

2411

2412

2413

2414

2415

2416

2417 if (MightCreatePoisonOrUB && !ConstantsAreOp1)

2418 return nullptr;

2419

2420

2421

2422

2423

2424

2425

2426

2428 }

2429

2432

2433

2434

2435

2436

2437

2438 if (auto *NewI = dyn_cast(NewBO)) {

2439 NewI->copyIRFlags(B0);

2440 NewI->andIRFlags(B1);

2441 if (DropNSW)

2442 NewI->setHasNoSignedWrap(false);

2444 NewI->dropPoisonGeneratingFlags();

2445 }

2447}

2448

2449

2450

2451

2453 bool IsBigEndian) {

2454

2459 return nullptr;

2460

2461

2462

2463 Type *SrcType = X->getType();

2465 cast(SrcType)->getNumElements() !=

2466 cast(DestType)->getNumElements() ||

2468 return nullptr;

2469

2471 "Expected a shuffle that decreases length");

2472

2473

2474

2478 for (unsigned i = 0, e = Mask.size(); i != e; ++i) {

2480 continue;

2481 uint64_t LSBIndex = IsBigEndian ? (i + 1) * TruncRatio - 1 : i * TruncRatio;

2482 assert(LSBIndex <= INT32_MAX && "Overflowed 32-bits");

2483 if (Mask[i] != (int)LSBIndex)

2484 return nullptr;

2485 }

2486

2488}

2489

2490

2491

2492

2495

2496

2498 return nullptr;

2499

2500

2501

2505 return nullptr;

2506

2507

2508

2509 unsigned NarrowNumElts =

2510 cast(Shuf.getType())->getNumElements();

2511 Value *NarrowCond;

2513 cast(NarrowCond->getType())->getNumElements() !=

2514 NarrowNumElts ||

2515 !cast(Cond)->isIdentityWithPadding())

2516 return nullptr;

2517

2518

2519

2520

2524}

2525

2526

2529 auto *S0 = dyn_cast(Shuf.getOperand(0));

2532 return nullptr;

2533

2534 bool IsFNeg = S0->getOpcode() == Instruction::FNeg;

2535

2536

2537

2540 if (IsFNeg)

2542

2547 return NewF;

2548 }

2549

2550

2551 auto *S1 = dyn_cast(Shuf.getOperand(1));

2554 S0->getOpcode() != S1->getOpcode() ||

2555 (!S0->hasOneUse() && S1->hasOneUse()))

2556 return nullptr;

2557

2558

2561 if (IsFNeg) {

2562 NewF = UnaryOperator::CreateFNeg(NewShuf);

2563 } else {

2567 }

2570 return NewF;

2571}

2572

2573

2576

2577 auto *Cast0 = dyn_cast(Shuf.getOperand(0));

2578 auto *Cast1 = dyn_cast(Shuf.getOperand(1));

2579 if (!Cast0 || !Cast1 || Cast0->getOpcode() != Cast1->getOpcode() ||

2580 Cast0->getSrcTy() != Cast1->getSrcTy())

2581 return nullptr;

2582

2583

2584

2586 switch (CastOpcode) {

2587 case Instruction::FPToSI:

2588 case Instruction::FPToUI:

2589 case Instruction::SIToFP:

2590 case Instruction::UIToFP:

2591 break;

2592 default:

2593 return nullptr;

2594 }

2595

2598 VectorType *CastSrcTy = cast(Cast0->getSrcTy());

2599

2600

2601 if (ShufTy->getElementCount().getKnownMinValue() >

2602 ShufOpTy->getElementCount().getKnownMinValue())

2603 return nullptr;

2604

2605

2606 assert(isa(CastSrcTy) && isa(ShufOpTy) &&

2607 "Expected fixed vector operands for casts and binary shuffle");

2608 if (CastSrcTy->getPrimitiveSizeInBits() > ShufOpTy->getPrimitiveSizeInBits())

2609 return nullptr;

2610

2611

2612 if (!Cast0->hasOneUse() && !Cast1->hasOneUse())

2613 return nullptr;

2614

2615

2616 Value *X = Cast0->getOperand(0);

2617 Value *Y = Cast1->getOperand(0);

2620}

2621

2622

2626 return nullptr;

2627

2628

2629

2632 X->getType()->getPrimitiveSizeInBits() ==

2635

2636

2640 return nullptr;

2641

2642

2643

2645 return nullptr;

2646

2647

2648

2649

2650

2651

2652

2653

2654

2655

2656

2657

2658 unsigned NumElts = cast(Shuf.getType())->getNumElements();

2660 assert(NumElts < Mask.size() &&

2661 "Identity with extract must have less elements than its inputs");

2662

2663 for (unsigned i = 0; i != NumElts; ++i) {

2665 int MaskElt = Mask[i];

2666 NewMask[i] = ExtractMaskElt == PoisonMaskElem ? ExtractMaskElt : MaskElt;

2667 }

2669}

2670

2671

2672

2678

2679 int NumElts = Mask.size();

2680 int InpNumElts = cast(V0->getType())->getNumElements();

2681

2682

2683

2684

2685

2686

2690

2693 }

2695

2696

2697 IdxC += InpNumElts;

2698

2701 }

2702

2703

2704

2705 if (NumElts != InpNumElts)

2706 return nullptr;

2707

2708

2709 auto isShufflingScalarIntoOp1 = [&](Value *&Scalar, ConstantInt *&IndexC) {

2710

2713 return false;

2714

2715

2716

2717 int NewInsIndex = -1;

2718 for (int i = 0; i != NumElts; ++i) {

2719

2720 if (Mask[i] == -1)

2721 continue;

2722

2723

2724 if (Mask[i] == NumElts + i)

2725 continue;

2726

2727

2728 if (NewInsIndex != -1 || Mask[i] != IndexC->getSExtValue())

2729 return false;

2730

2731

2732 NewInsIndex = i;

2733 }

2734

2735 assert(NewInsIndex != -1 && "Did not fold shuffle with unused operand?");

2736

2737

2738 IndexC = ConstantInt::get(IndexC->getIntegerType(), NewInsIndex);

2739 return true;

2740 };

2741

2742

2743

2744

2747 if (isShufflingScalarIntoOp1(Scalar, IndexC))

2749

2750

2751

2752

2755 if (isShufflingScalarIntoOp1(Scalar, IndexC))

2757

2758 return nullptr;

2759}

2760

2762

2763

2764

2765 auto *Shuffle0 = dyn_cast(Shuf.getOperand(0));

2766 auto *Shuffle1 = dyn_cast(Shuf.getOperand(1));

2767 if (!Shuffle0 || !Shuffle0->isIdentityWithPadding() ||

2768 !Shuffle1 || !Shuffle1->isIdentityWithPadding())

2769 return nullptr;

2770

2771

2772

2773

2774

2775

2776 Value *X = Shuffle0->getOperand(0);

2777 Value *Y = Shuffle1->getOperand(0);

2778 if (X->getType() != Y->getType() ||

2781 cast(Shuffle0->getType())->getNumElements()) ||

2782 isPowerOf2\_32(cast(X->getType())->getNumElements()) ||

2784 return nullptr;

2786 match(Shuffle1->getOperand(1), m_Undef()) &&

2787 "Unexpected operand for identity shuffle");

2788

2789

2790

2791

2792

2793 int NarrowElts = cast(X->getType())->getNumElements();

2794 int WideElts = cast(Shuffle0->getType())->getNumElements();

2795 assert(WideElts > NarrowElts && "Unexpected types for identity with padding");

2796

2799 for (int i = 0, e = Mask.size(); i != e; ++i) {

2800 if (Mask[i] == -1)

2801 continue;

2802

2803

2804

2805 if (Mask[i] < WideElts) {

2806 if (Shuffle0->getMaskValue(Mask[i]) == -1)

2807 continue;

2808 } else {

2809 if (Shuffle1->getMaskValue(Mask[i] - WideElts) == -1)

2810 continue;

2811 }

2812

2813

2814

2815

2816 if (Mask[i] < WideElts) {

2817 assert(Mask[i] < NarrowElts && "Unexpected shuffle mask");

2818 NewMask[i] = Mask[i];

2819 } else {

2820 assert(Mask[i] < (WideElts + NarrowElts) && "Unexpected shuffle mask");

2821 NewMask[i] = Mask[i] - (WideElts - NarrowElts);

2822 }

2823 }

2825}

2826

2827

2828

2829

2834 return nullptr;

2835

2842 return nullptr;

2843 if (X->getType() != Y->getType())

2844 return nullptr;

2845

2846 auto *BinOp = cast(Op0);

2848 return nullptr;

2849

2851 if (auto NewBOI = dyn_cast(NewBO))

2852 NewBOI->copyIRFlags(BinOp);

2853

2855}

2856

2862 SVI.getType(), ShufQuery))

2864

2866 return I;

2867

2868

2869

2872

2873 if (isa(LHS->getType()))

2874 return nullptr;

2875

2876 unsigned VWidth = cast(SVI.getType())->getNumElements();

2877 unsigned LHSWidth = cast(LHS->getType())->getNumElements();

2878

2879

2880

2881

2882

2883

2884

2885

2888 X->getType()->isVectorTy() && X->getType() == Y->getType() &&

2889 X->getType()->getScalarSizeInBits() ==

2893 SVI.getName() + ".uncasted");

2895 }

2896

2898

2899

2900

2901

2902

2903

2904

2906 X->getType()->isVectorTy() && VWidth == LHSWidth) {

2907

2908 auto *XType = cast(X->getType());

2909 unsigned XNumElts = XType->getNumElements();

2912

2913

2915 ScaledMask, XType, ShufQuery))

2917 }

2918 }

2919

2920

2923 "Shuffle with 2 undef ops not simplified?");

2925 }

2926

2927

2930 return &SVI;

2931 }

2932

2934 return I;

2935

2937 return I;

2938

2940 return I;

2941

2943 return I;

2944

2946 return I;

2947

2949 return I;

2950

2951 APInt PoisonElts(VWidth, 0);

2954 if (V != &SVI)

2956 return &SVI;

2957 }

2958

2960 return I;

2961

2962

2963

2965 return I;

2967 return I;

2968

2970 if (auto *SI = dyn_cast(LHS)) {

2971

2972

2973 if (SI->getCondition()->getType()->isIntegerTy() &&

2974 (isa(RHS) ||

2977 return I;

2978 }

2979 }

2980 if (auto *PN = dyn_cast(LHS)) {

2982 return I;

2983 }

2984 }

2985

2989 }

2990

2991

2992

2993

2994

2995

2996

2997

2998

2999

3000

3001

3002

3003

3004

3005

3006

3007

3008

3009

3010

3011

3012

3013

3014

3015

3016

3017

3018

3019

3020 bool MadeChange = false;

3023 unsigned MaskElems = Mask.size();

3024 auto *SrcTy = cast(V->getType());

3025 unsigned VecBitWidth = SrcTy->getPrimitiveSizeInBits().getFixedValue();

3026 unsigned SrcElemBitWidth = DL.getTypeSizeInBits(SrcTy->getElementType());

3027 assert(SrcElemBitWidth && "vector elements must have a bitwidth");

3028 unsigned SrcNumElems = SrcTy->getNumElements();

3032 if (BitCastInst *BC = dyn_cast(U))

3033 if (!BC->use_empty())

3034

3037 unsigned BegIdx = Mask.front();

3038 Type *TgtTy = BC->getDestTy();

3040 if (!TgtElemBitWidth)

3041 continue;

3042 unsigned TgtNumElems = VecBitWidth / TgtElemBitWidth;

3043 bool VecBitWidthsEqual = VecBitWidth == TgtNumElems * TgtElemBitWidth;

3044 bool BegIsAligned = 0 == ((SrcElemBitWidth * BegIdx) % TgtElemBitWidth);

3045 if (!VecBitWidthsEqual)

3046 continue;

3048 continue;

3050 if (!BegIsAligned) {

3051

3052

3054 for (unsigned I = 0, E = MaskElems, Idx = BegIdx; I != E; ++Idx, ++I)

3055 ShuffleMask[I] = Idx;

3057 SVI.getName() + ".extract");

3058 BegIdx = 0;

3059 }

3060 unsigned SrcElemsPerTgtElem = TgtElemBitWidth / SrcElemBitWidth;

3061 assert(SrcElemsPerTgtElem);

3062 BegIdx /= SrcElemsPerTgtElem;

3063 bool BCAlreadyExists = NewBCs.contains(CastSrcTy);

3064 auto *NewBC =

3065 BCAlreadyExists

3066 ? NewBCs[CastSrcTy]

3068 if (!BCAlreadyExists)

3069 NewBCs[CastSrcTy] = NewBC;

3071 SVI.getName() + ".extract");

3072

3073

3075 MadeChange = true;

3076 }

3077 }

3078

3079

3080

3081

3082

3083

3084

3085

3086

3087

3088

3089

3090

3091

3092

3093

3094

3095

3096

3097

3098

3099

3100

3101

3102

3103

3104

3105

3106

3107

3108

3109

3110

3111

3112

3113

3114

3115

3116

3117

3118

3119

3120

3121

3124 if (LHSShuffle)

3127 LHSShuffle = nullptr;

3128 if (RHSShuffle)

3130 RHSShuffle = nullptr;

3131 if (!LHSShuffle && !RHSShuffle)

3132 return MadeChange ? &SVI : nullptr;

3133

3134 Value* LHSOp0 = nullptr;

3135 Value* LHSOp1 = nullptr;

3136 Value* RHSOp0 = nullptr;

3137 unsigned LHSOp0Width = 0;

3138 unsigned RHSOp0Width = 0;

3139 if (LHSShuffle) {

3142 LHSOp0Width = cast(LHSOp0->getType())->getNumElements();

3143 }

3144 if (RHSShuffle) {

3146 RHSOp0Width = cast(RHSOp0->getType())->getNumElements();

3147 }

3150 if (LHSShuffle) {

3151

3153 newLHS = LHSOp0;

3154 newRHS = LHSOp1;

3155 }

3156

3157 else if (LHSOp0Width == LHSWidth) {

3158 newLHS = LHSOp0;

3159 }

3160 }

3161

3162 if (RHSShuffle && RHSOp0Width == LHSWidth) {

3163 newRHS = RHSOp0;

3164 }

3165

3166 if (LHSOp0 == RHSOp0) {

3167 newLHS = LHSOp0;

3168 newRHS = nullptr;

3169 }

3170

3171 if (newLHS == LHS && newRHS == RHS)

3172 return MadeChange ? &SVI : nullptr;

3173

3176 if (newLHS != LHS)

3178 if (RHSShuffle && newRHS != RHS)

3180

3181 unsigned newLHSWidth = (newLHS != LHS) ? LHSOp0Width : LHSWidth;

3184 int SplatElt = -1;

3185

3186

3187 for (unsigned i = 0; i < VWidth; ++i) {

3188 int eltMask;

3189 if (Mask[i] < 0) {

3190

3191 eltMask = -1;

3192 } else if (Mask[i] < (int)LHSWidth) {

3193

3194

3195

3196

3197 if (newLHS != LHS) {

3198 eltMask = LHSMask[Mask[i]];

3199

3200

3201 if (eltMask >= (int)LHSOp0Width && isa(LHSOp1))

3202 eltMask = -1;

3203 } else

3204 eltMask = Mask[i];

3205 } else {

3206

3207

3208

3209

3211 eltMask = -1;

3212

3213

3214 else if (newRHS != RHS) {

3215 eltMask = RHSMask[Mask[i]-LHSWidth];

3216

3217

3218 if (eltMask >= (int)RHSOp0Width) {

3220 "should have been check above");

3221 eltMask = -1;

3222 }

3223 } else

3224 eltMask = Mask[i]-LHSWidth;

3225

3226

3227

3228

3229

3230

3231

3232 if (eltMask >= 0 && newRHS != nullptr && newLHS != newRHS)

3233 eltMask += newLHSWidth;

3234 }

3235

3236

3237 if (eltMask >= 0) {

3238 if (SplatElt >= 0 && SplatElt != eltMask)

3240 SplatElt = eltMask;

3241 }

3242

3244 }

3245

3246

3247

3248 if (isSplat || newMask == LHSMask || newMask == RHSMask || newMask == Mask) {

3249 if (!newRHS)

3252 }

3253

3254 return MadeChange ? &SVI : nullptr;

3255}

This file implements a class to represent arbitrary precision integral constant values and operations...

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

This file contains the declarations for the subclasses of Constant, which represent the different fla...

Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx

This file defines the DenseMap class.

static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")

This file provides internal interfaces used to implement the InstCombine.

static Instruction * foldConstantInsEltIntoShuffle(InsertElementInst &InsElt)

insertelt (shufflevector X, CVec, Mask|insertelt X, C1, CIndex1), C, CIndex --> shufflevector X,...

static Value * evaluateInDifferentElementOrder(Value *V, ArrayRef< int > Mask, IRBuilderBase &Builder)

static bool collectSingleShuffleElements(Value *V, Value *LHS, Value *RHS, SmallVectorImpl< int > &Mask)

If V is a shuffle of values that ONLY returns elements from either LHS or RHS, return the shuffle mas...

static ShuffleOps collectShuffleElements(Value *V, SmallVectorImpl< int > &Mask, Value *PermittedRHS, InstCombinerImpl &IC, bool &Rerun)

static APInt findDemandedEltsByAllUsers(Value *V)

Find union of elements of V demanded by all its users.

static Instruction * foldTruncInsEltPair(InsertElementInst &InsElt, bool IsBigEndian, InstCombiner::BuilderTy &Builder)

If we are inserting 2 halves of a value into adjacent elements of a vector, try to convert to a singl...

static Instruction * foldSelectShuffleWith1Binop(ShuffleVectorInst &Shuf, const SimplifyQuery &SQ)

static Instruction * foldIdentityPaddedShuffles(ShuffleVectorInst &Shuf)

static Instruction * foldIdentityExtractShuffle(ShuffleVectorInst &Shuf)

Try to fold an extract subvector operation.

static Instruction * foldInsEltIntoSplat(InsertElementInst &InsElt)

Try to fold an insert element into an existing splat shuffle by changing the shuffle's mask to includ...

std::pair< Value *, Value * > ShuffleOps

We are building a shuffle to create V, which is a sequence of insertelement, extractelement pairs.

static Instruction * foldShuffleWithInsert(ShuffleVectorInst &Shuf, InstCombinerImpl &IC)

Try to replace a shuffle with an insertelement or try to replace a shuffle operand with the operand o...

static Instruction * canonicalizeInsertSplat(ShuffleVectorInst &Shuf, InstCombiner::BuilderTy &Builder)

If we have an insert of a scalar to a non-zero element of an undefined vector and then shuffle that v...

static Instruction * foldTruncShuffle(ShuffleVectorInst &Shuf, bool IsBigEndian)

Convert a narrowing shuffle of a bitcasted vector into a vector truncate.

static bool replaceExtractElements(InsertElementInst *InsElt, ExtractElementInst *ExtElt, InstCombinerImpl &IC)

If we have insertion into a vector that is wider than the vector that we are extracting from,...

static bool cheapToScalarize(Value *V, Value *EI)

Return true if the value is cheaper to scalarize than it is to leave as a vector operation.

static Value * buildNew(Instruction *I, ArrayRef< Value * > NewOps, IRBuilderBase &Builder)

Rebuild a new instruction just like 'I' but with the new operands given.

static bool canEvaluateShuffled(Value *V, ArrayRef< int > Mask, unsigned Depth=5)

Return true if we can evaluate the specified expression tree if the vector elements were shuffled in ...

static Instruction * foldSelectShuffleOfSelectShuffle(ShuffleVectorInst &Shuf)

A select shuffle of a select shuffle with a shared operand can be reduced to a single select shuffle.

static Instruction * hoistInsEltConst(InsertElementInst &InsElt2, InstCombiner::BuilderTy &Builder)

If we have an insertelement instruction feeding into another insertelement and the 2nd is inserting a...

static APInt findDemandedEltsBySingleUser(Value *V, Instruction *UserInstr)

Find elements of V demanded by UserInstr.

static Instruction * foldShuffleOfUnaryOps(ShuffleVectorInst &Shuf, InstCombiner::BuilderTy &Builder)

Canonicalize FP negate/abs after shuffle.

static Instruction * foldCastShuffle(ShuffleVectorInst &Shuf, InstCombiner::BuilderTy &Builder)

Canonicalize casts after shuffle.

static Instruction * narrowInsElt(InsertElementInst &InsElt, InstCombiner::BuilderTy &Builder)

If both the base vector and the inserted element are extended from the same type, do the insert eleme...

static bool isShuffleEquivalentToSelect(ShuffleVectorInst &Shuf)

static Instruction * foldInsSequenceIntoSplat(InsertElementInst &InsElt)

Turn a chain of inserts that splats a value into an insert + shuffle: insertelt(insertelt(insertelt(i...

static Instruction * foldInsEltIntoIdentityShuffle(InsertElementInst &InsElt)

Try to fold an extract+insert element into an existing identity shuffle by changing the shuffle's mas...

static ConstantInt * getPreferredVectorIndex(ConstantInt *IndexC)

Given a constant index for a extractelement or insertelement instruction, return it with the canonica...

static bool isShuffleExtractingFromLHS(ShuffleVectorInst &SVI, ArrayRef< int > Mask)

static BinopElts getAlternateBinop(BinaryOperator *BO, const DataLayout &DL)

Binops may be transformed into binops with different opcodes and operands.

This file provides the interface for the instcombine pass implementation.

static bool isSplat(Value *V)

Return true if V is a splat of a value (which is used when multiplying a matrix with a scalar).

uint64_t IntrinsicInst * II

static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")

const SmallVectorImpl< MachineOperand > & Cond

assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())

This file implements the SmallBitVector class.

This file defines the SmallVector class.

This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...

#define STATISTIC(VARNAME, DESC)

static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)

Returns the opcode of Values or ~0 if they do not all agree.

static SDValue narrowVectorSelect(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, const X86Subtarget &Subtarget)

If both arms of a vector select are concatenated vectors, split the select, and concatenate the resul...

static const uint32_t IV[8]

Class for arbitrary precision integers.

static APInt getAllOnes(unsigned numBits)

Return an APInt of a specified width with all bits set.

APInt zextOrTrunc(unsigned width) const

Zero extend or truncate to width.

unsigned getActiveBits() const

Compute the number of active bits in the value.

void setBit(unsigned BitPosition)

Set the given bit to 1 whose position is given as "bitPosition".

bool isAllOnes() const

Determine if all bits are set. This is true for zero-width values.

bool ult(const APInt &RHS) const

Unsigned less than comparison.

static APInt getOneBitSet(unsigned numBits, unsigned BitNo)

Return an APInt with exactly one bit set in the result.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

const T & front() const

front - Get the first element.

size_t size() const

size - Get the array size.

ArrayRef< T > slice(size_t N, size_t M) const

slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.

LLVM Basic Block Representation.

const_iterator getFirstInsertionPt() const

Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...

const Function * getParent() const

Return the enclosing method, or null if none.

InstListType::iterator iterator

Instruction iterators...

const Instruction * getTerminator() const LLVM_READONLY

Returns the terminator instruction if the block is well formed or null if the block is not well forme...

BinaryOps getOpcode() const

static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)

Construct a binary instruction, given the opcode and the two operands.

static BinaryOperator * CreateWithCopiedFlags(BinaryOps Opc, Value *V1, Value *V2, Value *CopyO, const Twine &Name="", InsertPosition InsertBefore=nullptr)

This class represents a no-op cast from one type to another.

This class represents a function call, abstracting a target machine's calling convention.

static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...

This class is the base class for the comparison instructions.

static CmpInst * CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Instruction *FlagsSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Construct a compare instruction, given the opcode, the predicate, the two operands and the instructio...

OtherOps getOpcode() const

Get the opcode casted to the right type.

An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...

static ConstantAggregateZero * get(Type *Ty)

static Constant * getShuffleVector(Constant *V1, Constant *V2, ArrayRef< int > Mask, Type *OnlyIfReducedTy=nullptr)

static Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)

Return the identity constant for a binary opcode.

This is the shared class of boolean and integer constants.

uint64_t getLimitedValue(uint64_t Limit=~0ULL) const

getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...

unsigned getBitWidth() const

getBitWidth - Return the scalar bitwidth of this constant.

uint64_t getZExtValue() const

Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...

const APInt & getValue() const

Return the constant as an APInt value reference.

static Constant * get(ArrayRef< Constant * > V)

This is an important base class in LLVM.

static Constant * getAllOnesValue(Type *Ty)

Constant * getAggregateElement(unsigned Elt) const

For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...

This class represents an Operation in the Expression.

A parsed version of the target data layout string in and methods for querying it.

TypeSize getTypeSizeInBits(Type *Ty) const

Size examples:

bool contains(const_arg_type_t< KeyT > Val) const

Return true if the specified key is in the map, false otherwise.

std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)

static FixedVectorType * get(Type *ElementType, unsigned NumElts)

an instruction for type-safe pointer arithmetic to access elements of arrays and structs

static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

void setIsInBounds(bool b=true)

Set or clear the inbounds flag on this GEP instruction.

Common base class shared among various IRBuilders.

Value * CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)

Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")

Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")

Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)

Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="", MDNode *FPMathTag=nullptr, FMFSource FMFSource={})

Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())

ConstantInt * getInt64(uint64_t C)

Get a constant 64-bit value.

Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")

Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")

Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)

Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)

void SetInsertPoint(BasicBlock *TheBB)

This specifies that created instructions should be appended to the end of the specified block.

Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")

This instruction inserts a single (scalar) element into a VectorType value.

static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

VectorType * getType() const

Overload to return most specific vector type.

This instruction inserts a struct field of array element value into an aggregate value.

Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)

Given an instruction with a select as one operand and a constant as the other operand,...

Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)

Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...

Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override

The specified value produces a vector with any number of elements.

Instruction * foldSelectShuffle(ShuffleVectorInst &Shuf)

Try to fold shuffles that are the equivalent of a vector select.

Instruction * visitInsertValueInst(InsertValueInst &IV)

Try to find redundant insertvalue instructions, like the following ones: %0 = insertvalue { i8,...

Instruction * visitInsertElementInst(InsertElementInst &IE)

Instruction * visitExtractElementInst(ExtractElementInst &EI)

Instruction * simplifyBinOpSplats(ShuffleVectorInst &SVI)

Instruction * foldAggregateConstructionIntoAggregateReuse(InsertValueInst &OrigIVI)

Look for chain of insertvalue's that fully define an aggregate, and trace back the values inserted,...

Instruction * visitShuffleVectorInst(ShuffleVectorInst &SVI)

Instruction * replaceInstUsesWith(Instruction &I, Value *V)

A combiner-aware RAUW-like routine.

InstructionWorklist & Worklist

A worklist of the instructions that need to be simplified.

Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)

Same as InsertNewInstBefore, but also sets the debug loc.

void addToWorklist(Instruction *I)

Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)

Replace operand of instruction and add old operand to the worklist.

static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)

Some binary operators require special handling to avoid poison and undefined behavior.

const SimplifyQuery & getSimplifyQuery() const

void addValue(Value *V)

Add value to the worklist if it is an instruction.

bool hasNoUnsignedWrap() const LLVM_READONLY

Determine whether the no unsigned wrap flag is set.

bool hasNoSignedWrap() const LLVM_READONLY

Determine whether the no signed wrap flag is set.

void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)

Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...

const Module * getModule() const

Return the module owning the function this instruction belongs to or nullptr it the function does not...

void andIRFlags(const Value *V)

Logical 'and' of any supported wrapping, exact, and fast-math flags of V and this instruction.

void setFastMathFlags(FastMathFlags FMF)

Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...

Instruction * user_back()

Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...

bool isExact() const LLVM_READONLY

Determine whether the exact flag is set.

unsigned getOpcode() const

Returns a member of one of the enums like Instruction::Add.

void dropPoisonGeneratingFlags()

Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.

A wrapper class for inspecting calls to intrinsic functions.

void addIncoming(Value *V, BasicBlock *BB)

Add an incoming value to the end of the PHI list.

BasicBlock * getIncomingBlock(unsigned i) const

Return incoming basic block number i.

Value * getIncomingValue(unsigned i) const

Return incoming value number x.

unsigned getNumIncomingValues() const

Return the number of incoming edges.

static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...

In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...

static PoisonValue * get(Type *T)

Static factory methods - Return an 'poison' object of the specified type.

This class represents the LLVM 'select' instruction.

static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)

This instruction constructs a fixed permutation of two input vectors.

bool changesLength() const

Return true if this shuffle returns a vector with a different number of elements than its source vect...

int getMaskValue(unsigned Elt) const

Return the shuffle mask value of this instruction for the given element index.

static bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)

Return true if this shuffle mask chooses elements from its source vectors without lane crossings.

VectorType * getType() const

Overload to return most specific vector type.

bool increasesLength() const

Return true if this shuffle returns a vector with a greater number of elements than its source vector...

bool isIdentityWithExtract() const

Return true if this shuffle extracts the first N elements of exactly one source vector.

static void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)

Convert the input shuffle mask operand to a vector of integers.

bool isSelect() const

Return true if this shuffle chooses elements from its source vectors without lane crossings and all o...

static bool isIdentityMask(ArrayRef< int > Mask, int NumSrcElts)

Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...

static void commuteShuffleMask(MutableArrayRef< int > Mask, unsigned InVecNumElts)

Change values in a shuffle permute mask assuming the two vector operands of length InVecNumElts have ...

void commute()

Swap the operands and adjust the mask to preserve the semantics of the instruction.

This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...

bool all() const

Returns true if all bits are set.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

reference emplace_back(ArgTypes &&... Args)

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

This class represents a truncation of integer types.

The instances of the Type class are immutable: once they are created, they are never changed.

unsigned getIntegerBitWidth() const

bool isVectorTy() const

True if this is an instance of VectorType.

bool isIntOrIntVectorTy() const

Return true if this is an integer type or a vector of integer types.

unsigned getStructNumElements() const

uint64_t getArrayNumElements() const

static IntegerType * getIntNTy(LLVMContext &C, unsigned N)

unsigned getScalarSizeInBits() const LLVM_READONLY

If this is a vector type, return the getPrimitiveSizeInBits value for the element type.

bool isFloatingPointTy() const

Return true if this is one of the floating-point types.

static IntegerType * getInt64Ty(LLVMContext &C)

bool isIntegerTy() const

True if this is an instance of IntegerType.

TypeID getTypeID() const

Return the type id for the type.

TypeSize getPrimitiveSizeInBits() const LLVM_READONLY

Return the basic size of this type if it is a primitive type.

static UnaryOperator * CreateWithCopiedFlags(UnaryOps Opc, Value *V, Instruction *CopyO, const Twine &Name="", InsertPosition InsertBefore=nullptr)

UnaryOps getOpcode() const

static UnaryOperator * CreateFNegFMF(Value *Op, Instruction *FMFSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)

static UndefValue * get(Type *T)

Static factory methods - Return an 'undef' object of the specified type.

A Use represents the edge between a Value definition and its users.

Value * getOperand(unsigned i) const

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

const Value * DoPHITranslation(const BasicBlock *CurBB, const BasicBlock *PredBB) const

Translate PHI node to its predecessor from the given basic block.

bool hasOneUse() const

Return true if there is exactly one use of this value.

void replaceAllUsesWith(Value *V)

Change all uses of this to point to a new Value.

iterator_range< user_iterator > users()

LLVMContext & getContext() const

All values hold a context through their type.

StringRef getName() const

Return a constant reference to the value's name.

ElementCount getElementCount() const

Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...

static VectorType * get(Type *ElementType, ElementCount EC)

This static method is the primary way to construct an VectorType.

static bool isValidElementType(Type *ElemTy)

Return true if the specified type is valid as a element type.

Type * getElementType() const

constexpr bool isScalable() const

Returns whether the quantity is scaled by a runtime quantity (vscale).

constexpr ScalarTy getKnownMinValue() const

Returns the minimum value this quantity can represent.

const ParentTy * getParent() const

self_iterator getIterator()

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

@ C

The default llvm calling convention, compatible with C.

Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})

Look up the Function declaration of the intrinsic id in the Module M.

class_match< PoisonValue > m_Poison()

Match an arbitrary poison constant.

class_match< BinaryOperator > m_BinOp()

Match an arbitrary binary operation and ignore it.

class_match< Constant > m_Constant()

Match an arbitrary Constant and ignore it.

CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)

Matches Trunc.

specific_intval< false > m_SpecificInt(const APInt &V)

Match a specific integer value or vector with all elements equal to the value.

bool match(Val *V, const Pattern &P)

specificval_ty m_Specific(const Value *V)

Match if we have a specific specified value.

TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)

Matches ExtractElementInst.

class_match< ConstantInt > m_ConstantInt()

Match an arbitrary ConstantInt and ignore it.

ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)

Matches SelectInst.

cst_pred_ty< is_zero_int > m_ZeroInt()

Match an integer 0 or a vector with all elements equal to 0.

OneUse_match< T > m_OneUse(const T &SubPattern)

BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)

Matches a 'Neg' as 'sub 0, V'.

TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)

Matches ShuffleVectorInst independently of mask value.

match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()

Match an arbitrary immediate Constant and ignore it.

CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)

OneOps_match< OpTy, Instruction::Load > m_Load(const OpTy &Op)

Matches LoadInst.

CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)

Matches ZExt.

class_match< CmpInst > m_Cmp()

Matches any compare instruction and ignore it.

CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)

Matches BitCast.

class_match< UnaryOperator > m_UnOp()

Match an arbitrary unary operation and ignore it.

class_match< Value > m_Value()

Match an arbitrary value and ignore it.

BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)

FNeg_match< OpTy > m_FNeg(const OpTy &X)

Match 'fneg X' as 'fsub -0.0, X'.

auto m_Undef()

Match an arbitrary undef constant.

CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)

Matches SExt.

is_zero m_Zero()

Match any null constant or a vector with all elements equal to 0.

ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)

Matches InsertElementInst.

m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)

match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)

Combine two pattern matchers matching L || R.

This is an optimization pass for GlobalISel generic memory operations.

bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I)

Don't use information from its non-constant operands.

auto enumerate(FirstRange &&First, RestRanges &&...Rest)

Given two or more input ranges, returns a new range whose values are tuples (A, B,...

llvm::SmallVector< int, 16 > createUnaryMask(ArrayRef< int > Mask, unsigned NumElts)

Given a shuffle mask for a binary shuffle, create the equivalent shuffle mask assuming both operands ...

Value * simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef< int > Mask, Type *RetTy, const SimplifyQuery &Q)

Given operands for a ShuffleVectorInst, fold the result or return null.

constexpr bool isPowerOf2_32(uint32_t Value)

Return true if the argument is a power of two > 0.

Value * simplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)

Given operands for an InsertValueInst, fold the result or return null.

Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)

Attempt to constant fold a binary operation with the specified operands.

constexpr int PoisonMaskElem

Value * findScalarElement(Value *V, unsigned EltNo)

Given a vector and an element number, see if the scalar value is already around as a register,...

Value * simplifyInsertElementInst(Value *Vec, Value *Elt, Value *Idx, const SimplifyQuery &Q)

Given operands for an InsertElement, fold the result or return null.

constexpr unsigned BitWidth

auto count_if(R &&Range, UnaryPredicate P)

Wrapper function around std::count_if to count the number of times an element satisfying a given pred...

auto predecessors(const MachineBasicBlock *BB)

bool is_contained(R &&Range, const E &Element)

Returns true if Element is found in Range.

bool pred_empty(const BasicBlock *BB)

bool isKnownNeverNaN(const Value *V, unsigned Depth, const SimplifyQuery &SQ)

Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...

bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)

Returns true if V cannot be poison, but may be undef.

Value * simplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &Q)

Given operands for an ExtractElementInst, fold the result or return null.

bool scaleShuffleMaskElts(unsigned NumDstElts, ArrayRef< int > Mask, SmallVectorImpl< int > &ScaledMask)

Attempt to narrow/widen the Mask shuffle mask to the NumDstElts target width.

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

These are the ingredients in an alternate form binary operator as described below.

BinopElts(BinaryOperator::BinaryOps Opc=(BinaryOperator::BinaryOps) 0, Value *V0=nullptr, Value *V1=nullptr)

BinaryOperator::BinaryOps Opcode

SimplifyQuery getWithInstruction(const Instruction *I) const