LLVM: lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

25using namespace llvm;

27

28#define DEBUG_TYPE "instcombine"

29

30STATISTIC(NumDeadStore, "Number of dead stores eliminated");

31STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");

32

34 "instcombine-max-copied-from-constant-users", cl::init(300),

35 cl::desc("Maximum users to visit in copy from constant transform"),

37

38

39

40

41

42

43

44

45static bool

49

50

51

52

57 while (!Worklist.empty()) {

58 ValueAndIsOffset Elem = Worklist.pop_back_val();

59 if (!Visited.insert(Elem).second)

60 continue;

62 return false;

63

64 const auto [Value, IsOffset] = Elem;

67

69

70 if (!LI->isSimple()) return false;

71 continue;

72 }

73

75

76

77

79 continue;

80 }

82

84 continue;

85 }

87

88

89 Worklist.emplace_back(I, IsOffset || GEP->hasAllZeroIndices());

90 continue;

91 }

92

94

95

96 if (Call->isCallee(&U))

97 continue;

98

99 unsigned DataOpNo = Call->getDataOperandNo(&U);

100 bool IsArgOperand = Call->isArgOperand(&U);

101

102

103 if (IsArgOperand && Call->isInAllocaArgument(DataOpNo))

104 return false;

105

106

107

108

109 bool NoCapture = Call->doesNotCapture(DataOpNo);

110 if (NoCapture &&

111 (Call->onlyReadsMemory() || Call->onlyReadsMemory(DataOpNo)))

112 continue;

113 }

114

115

116 if (I->isLifetimeStartOrEnd()) {

117 assert(I->use_empty() && "Lifetime markers have no result to use!");

119 continue;

120 }

121

122

123

125 if (MI)

126 return false;

127

128

129 if (MI->isVolatile())

130 return false;

131

132

133

134 if (U.getOperandNo() == 1)

135 continue;

136

137

138 if (TheCopy) return false;

139

140

141

142 if (IsOffset) return false;

143

144

145 if (U.getOperandNo() != 0) return false;

146

147

148 if (isModSet(AA->getModRefInfoMask(MI->getSource())))

149 return false;

150

151

152 TheCopy = MI;

153 }

154 }

155 return true;

156}

157

158

159

160

161

168 return TheCopy;

169 return nullptr;

170}

171

172

176 return false;

178 if (!AllocaSize)

179 return false;

181 APInt(64, AllocaSize), DL);

182}

183

186

188

190 return nullptr;

191

192

194 }

195

196

198 if (C->getValue().getActiveBits() <= 64) {

202 New->setAlignment(AI.getAlign());

204

207 }

208 }

209

212

213

214

215

220 }

221

222 return nullptr;

223}

224

225namespace {

226

227

228

229

230

231

232

233

234

235

236class PointerReplacer {

237public:

238 PointerReplacer(InstCombinerImpl &IC, Instruction &Root, unsigned SrcAS)

239 : IC(IC), Root(Root), FromAS(SrcAS) {}

240

241 bool collectUsers();

242 void replacePointer(Value *V);

243

244private:

245 void replace(Instruction *I);

246 Value *getReplacement(Value *V) const { return WorkMap.lookup(V); }

248 return I == &Root || UsersToReplace.contains(I);

249 }

250

251 bool isEqualOrValidAddrSpaceCast(const Instruction *I,

252 unsigned FromAS) const {

254 if (!ASC)

255 return false;

256 unsigned ToAS = ASC->getDestAddressSpace();

257 return (FromAS == ToAS) || IC.isValidAddrSpaceCast(FromAS, ToAS);

258 }

259

260 SmallSetVector<Instruction *, 32> UsersToReplace;

261 MapVector<Value *, Value *> WorkMap;

262 InstCombinerImpl &IC;

264 unsigned FromAS;

265};

266}

267

268bool PointerReplacer::collectUsers() {

270 SmallSetVector<Instruction *, 32> ValuesToRevisit;

271

272 auto PushUsersToWorklist = [&](Instruction *Inst) {

273 for (auto *U : Inst->users())

277 };

278

279 auto TryPushInstOperand = [&](Instruction *InstOp) {

280 if (!UsersToReplace.contains(InstOp)) {

281 if (!ValuesToRevisit.insert(InstOp))

282 return false;

284 }

285 return true;

286 };

287

288 PushUsersToWorklist(&Root);

289 while (!Worklist.empty()) {

292 if (Load->isVolatile())

293 return false;

294 UsersToReplace.insert(Load);

296

297

298

299 bool IsReplaceable = true;

301 if (!isa(V))

302 return IsReplaceable = false;

303 return isAvailable(cast(V));

304 })) {

305 UsersToReplace.insert(PHI);

306 PushUsersToWorklist(PHI);

307 continue;

308 }

309

310

311

312

313 if (!IsReplaceable || !ValuesToRevisit.insert(PHI))

314 return false;

315

316

317

319 for (unsigned Idx = 0; Idx < PHI->getNumIncomingValues(); ++Idx) {

321 return false;

322 }

326 if (!TrueInst || !FalseInst)

327 return false;

328

330 UsersToReplace.insert(SI);

331 PushUsersToWorklist(SI);

332 continue;

333 }

334

335

336

338 if (!TryPushInstOperand(TrueInst) || !TryPushInstOperand(FalseInst))

339 return false;

342 if (!PtrOp)

343 return false;

345 UsersToReplace.insert(GEP);

346 PushUsersToWorklist(GEP);

347 continue;

348 }

349

351 if (!TryPushInstOperand(PtrOp))

352 return false;

354 if (MI->isVolatile())

355 return false;

356 UsersToReplace.insert(Inst);

357 } else if (isEqualOrValidAddrSpaceCast(Inst, FromAS)) {

358 UsersToReplace.insert(Inst);

359 PushUsersToWorklist(Inst);

361 continue;

362 } else {

363

364

365 LLVM_DEBUG(dbgs() << "Cannot handle pointer user: " << *Inst << '\n');

366 return false;

367 }

368 }

369

370 return true;

371}

372

373void PointerReplacer::replacePointer(Value *V) {

375 "Invalid usage");

376 WorkMap[&Root] = V;

378 SetVector<Instruction *> PostOrderWorklist;

379 SmallPtrSet<Instruction *, 32> Visited;

380

381

383 while (!Worklist.empty()) {

385

386

387

388 if (Visited.insert(I).second) {

389 for (auto *U : I->users()) {

391 if (UsersToReplace.contains(UserInst) && !Visited.contains(UserInst))

393 }

394

395

396 } else {

397 PostOrderWorklist.insert(I);

399 }

400 }

401

402

403 for (Instruction *I : reverse(PostOrderWorklist))

405}

406

407void PointerReplacer::replace(Instruction *I) {

408 if (getReplacement(I))

409 return;

410

412 auto *V = getReplacement(LT->getPointerOperand());

413 assert(V && "Operand not replaced");

414 auto *NewI = new LoadInst(LT->getType(), V, "", LT->isVolatile(),

415 LT->getAlign(), LT->getOrdering(),

416 LT->getSyncScopeID());

417 NewI->takeName(LT);

419

422

423

424

425 WorkMap[NewI] = NewI;

427

428

429 Value *V = WorkMap.lookup(PHI->getIncomingValue(0));

430 PHI->mutateType(V ? V->getType() : PHI->getIncomingValue(0)->getType());

431 for (unsigned int I = 0; I < PHI->getNumIncomingValues(); ++I) {

432 Value *V = WorkMap.lookup(PHI->getIncomingValue(I));

433 PHI->setIncomingValue(I, V ? V : PHI->getIncomingValue(I));

434 }

437 auto *V = getReplacement(GEP->getPointerOperand());

438 assert(V && "Operand not replaced");

439 SmallVector<Value *, 8> Indices(GEP->indices());

440 auto *NewI =

444 NewI->setNoWrapFlags(GEP->getNoWrapFlags());

445 WorkMap[GEP] = NewI;

447 Value *TrueValue = SI->getTrueValue();

448 Value *FalseValue = SI->getFalseValue();

449 if (Value *Replacement = getReplacement(TrueValue))

450 TrueValue = Replacement;

451 if (Value *Replacement = getReplacement(FalseValue))

452 FalseValue = Replacement;

454 SI->getName(), nullptr, SI);

457 WorkMap[SI] = NewSI;

459 auto *DestV = MemCpy->getRawDest();

460 auto *SrcV = MemCpy->getRawSource();

461

462 if (auto *DestReplace = getReplacement(DestV))

463 DestV = DestReplace;

464 if (auto *SrcReplace = getReplacement(SrcV))

465 SrcV = SrcReplace;

466

469 MemCpy->getIntrinsicID(), DestV, MemCpy->getDestAlign(), SrcV,

470 MemCpy->getSourceAlign(), MemCpy->getLength(), MemCpy->isVolatile());

472 if (AAMD)

473 NewI->setAAMetadata(AAMD);

474

476 WorkMap[MemCpy] = NewI;

478 auto *V = getReplacement(ASC->getPointerOperand());

479 assert(V && "Operand not replaced");

480 assert(isEqualOrValidAddrSpaceCast(

481 ASC, V->getType()->getPointerAddressSpace()) &&

482 "Invalid address space cast!");

483

484 if (V->getType()->getPointerAddressSpace() !=

485 ASC->getType()->getPointerAddressSpace()) {

486 auto *NewI = new AddrSpaceCastInst(V, ASC->getType(), "");

487 NewI->takeName(ASC);

489 WorkMap[ASC] = NewI;

490 } else {

491 WorkMap[ASC] = V;

492 }

493

494 } else {

496 }

497}

498

501 return I;

502

504

505

506

507 if (DL.getTypeAllocSize(AI.getAllocatedType()).getKnownMinValue() == 0) {

508

509

510

514

515

518 if (&*FirstInst != &AI) {

519

520

521

525 .getKnownMinValue() != 0) {

527 return &AI;

528 }

529

530

531

532

536 }

537 }

538 }

539

540

541

542

543

544

545

548 Value *TheSrc = Copy->getSource();

551 TheSrc, AllocaAlign, DL, &AI, &AC, &DT);

552 if (AllocaAlign <= SourceAlign &&

555

556

557 LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');

563

566 ++NumGlobalCopies;

567 return NewI;

568 }

569

570 PointerReplacer PtrReplacer(*this, AI, SrcAddrSpace);

571 if (PtrReplacer.collectUsers()) {

574

575 PtrReplacer.replacePointer(TheSrc);

576 ++NumGlobalCopies;

577 }

578 }

579 }

580

581

582

584}

585

586

588 return Ty->isIntOrPtrTy() || Ty->isFloatingPointTy();

589}

590

591

592

593

594

595

596

597

598

599

601 const Twine &Suffix) {

603 "can't fold an atomic load to requested type");

604

610 return NewLoad;

611}

612

613

614

615

619 "can't fold an atomic store of requested type");

620

621 Value *Ptr = SI.getPointerOperand();

623 SI.getAllMetadata(MD);

624

627 NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID());

628 for (const auto &MDPair : MD) {

629 unsigned ID = MDPair.first;

630 MDNode *N = MDPair.second;

631

632

633

634

635

636

637

638

639 switch (ID) {

640 case LLVMContext::MD_dbg:

641 case LLVMContext::MD_DIAssignID:

642 case LLVMContext::MD_tbaa:

643 case LLVMContext::MD_prof:

644 case LLVMContext::MD_fpmath:

645 case LLVMContext::MD_tbaa_struct:

646 case LLVMContext::MD_alias_scope:

647 case LLVMContext::MD_noalias:

648 case LLVMContext::MD_nontemporal:

649 case LLVMContext::MD_mem_parallel_loop_access:

650 case LLVMContext::MD_access_group:

651

653 break;

654 case LLVMContext::MD_invariant_load:

655 case LLVMContext::MD_nonnull:

656 case LLVMContext::MD_noundef:

657 case LLVMContext::MD_range:

658 case LLVMContext::MD_align:

659 case LLVMContext::MD_dereferenceable:

660 case LLVMContext::MD_dereferenceable_or_null:

661

662 break;

663 }

664 }

665

666 return NewStore;

667}

668

669

670

671

672

673

674

675

676

677

678

679

680

681

682

683

684

685

688

689

690 if (!Load.isUnordered())

691 return nullptr;

692

693 if (Load.use_empty())

694 return nullptr;

695

696

697 if (Load.getPointerOperand()->isSwiftError())

698 return nullptr;

699

700

701

702

703 if (Load.hasOneUse()) {

704

705

706 Type *LoadTy = Load.getType();

708 assert(!LoadTy->isX86_AMXTy() && "Load from x86_amx* should not happen!");

709 if (BC->getType()->isX86_AMXTy())

710 return nullptr;

711 }

712

714 Type *DestTy = CastUser->getDestTy();

721 return &Load;

722 }

723 }

724 }

725

726

727

728 return nullptr;

729}

730

732

733

735 return nullptr;

736

738 if (T->isAggregateType())

739 return nullptr;

740

742

744

745 auto NumElements = ST->getNumElements();

746 if (NumElements == 1) {

748 ".unpack");

750

751 NewLoad->copyMetadata(LI, LLVMContext::MD_invariant_load);

754 }

755

756

757

759 auto *SL = DL.getStructLayout(ST);

760

761 if (SL->hasPadding())

762 return nullptr;

763

766 auto *IdxType = DL.getIndexType(Addr->getType());

767

769 for (unsigned i = 0; i < NumElements; i++) {

772 Name + ".elt");

774 ST->getElementType(i), Ptr,

776 Name + ".unpack");

777

779

780 L->copyMetadata(LI, LLVMContext::MD_invariant_load);

782 }

783

784 V->setName(Name);

786 }

787

789 auto *ET = AT->getElementType();

790 auto NumElements = AT->getNumElements();

791 if (NumElements == 1) {

796 }

797

798

799

800

801

803 return nullptr;

804

806 TypeSize EltSize = DL.getTypeAllocSize(ET);

808

811 auto *Zero = ConstantInt::get(IdxType, 0);

812

815 for (uint64_t i = 0; i < NumElements; i++) {

816 Value *Indices[2] = {

817 Zero,

818 ConstantInt::get(IdxType, i),

819 };

821 Name + ".elt");

824 EltAlign, Name + ".unpack");

828 }

829

830 V->setName(Name);

832 }

833

834 return nullptr;

835}

836

837

838

839

840

841

842

847

848 do {

850 P = P->stripPointerCasts();

851

852 if (!Visited.insert(P).second)

853 continue;

854

858 continue;

859 }

860

862 append_range(Worklist, PN->incoming_values());

863 continue;

864 }

865

867 if (GA->isInterposable())

868 return false;

869 Worklist.push_back(GA->getAliasee());

870 continue;

871 }

872

873

874

876 if (!AI->getAllocatedType()->isSized())

877 return false;

878

880 if (!CS)

881 return false;

882

883 TypeSize TS = DL.getTypeAllocSize(AI->getAllocatedType());

885 return false;

886

887

889 .ugt(MaxSize))

890 return false;

891 continue;

892 }

893

895 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())

896 return false;

897

898 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());

899 if (InitSize > MaxSize)

900 return false;

901 continue;

902 }

903

904 return false;

905 } while (!Worklist.empty());

906

907 return true;

908}

909

910

911

912

913

914

915

916

917

918

919

920

921

922

923

924

927 unsigned &Idx) {

929 return false;

930

931

932

934 unsigned I = 1;

938 if (CI->isZero())

939 continue;

940

941 break;

942 }

943

944 return I;

945 };

946

947

948

949 Idx = FirstNZIdx(GEPI);

951 return false;

953 return false;

954

957

958

960 return false;

961

963 if (!AllocTy || !AllocTy->isSized())

964 return false;

966 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy).getFixedValue();

967

968

969

970

971

972 auto IsAllNonNegative = [&]() {

973 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {

976 continue;

977 return false;

978 }

979

980 return true;

981 };

982

983

984

985

986

987

989 return false;

990

991

992

994 IsAllNonNegative();

995}

996

997

998

999

1003 unsigned Idx;

1007 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));

1009 return NewGEPI;

1010 }

1011 }

1012

1013 return nullptr;

1014}

1015

1018 return false;

1019

1020 auto *Ptr = SI.getPointerOperand();

1022 Ptr = GEPI->getOperand(0);

1025}

1026

1029 const Value *GEPI0 = GEPI->getOperand(0);

1032 return true;

1033 }

1037 return true;

1038 return false;

1039}

1040

1041Value *InstCombinerImpl::simplifyNonNullOperand(Value *V,

1042 bool HasDereferenceable,

1043 unsigned Depth) {

1046 return Sel->getOperand(2);

1047

1049 return Sel->getOperand(1);

1050 }

1051

1052 if (V->hasOneUse())

1053 return nullptr;

1054

1057 return nullptr;

1058

1060 if (HasDereferenceable || GEP->isInBounds()) {

1061 if (auto *Res = simplifyNonNullOperand(GEP->getPointerOperand(),

1062 HasDereferenceable, Depth + 1)) {

1063 replaceOperand(*GEP, 0, Res);

1065 return nullptr;

1066 }

1067 }

1068 }

1069

1072 for (Use &U : PHI->incoming_values()) {

1073

1074 if (auto *Res = simplifyNonNullOperand(U.get(), HasDereferenceable,

1076 replaceUse(U, Res);

1078 }

1079 }

1082 return nullptr;

1083 }

1084

1085 return nullptr;

1086}

1087

1092

1093

1095 return Res;

1096

1097

1100

1102 return Res;

1103

1104

1105

1106

1107 bool IsLoadCSE = false;

1110 if (IsLoadCSE)

1112

1114 LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.getType(),

1115 LI.getName() + ".cast"));

1116 }

1117

1118

1119

1121

1122

1123

1124

1128 }

1129

1130 if (Op->hasOneUse()) {

1131

1132

1133

1134

1135

1136

1137

1138

1139

1140

1142

1145 Alignment, DL, SI) &&

1147 Alignment, DL, SI)) {

1150 SI->getOperand(1)->getName() + ".val");

1153 SI->getOperand(2)->getName() + ".val");

1159

1160

1164 }

1165 }

1166 }

1167

1169 if (Value *V = simplifyNonNullOperand(Op, true))

1171

1172 return nullptr;

1173}

1174

1175

1176

1177

1178

1179

1180

1181

1182

1183

1184

1185

1186

1187

1188

1190 Value *U = nullptr;

1193 if (E)

1194 return nullptr;

1195 auto *W = E->getVectorOperand();

1196 if (!U)

1197 U = W;

1198 else if (U != W)

1199 return nullptr;

1201 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())

1202 return nullptr;

1203 V = IV->getAggregateOperand();

1204 }

1206 return nullptr;

1207

1209 auto *VT = V->getType();

1210

1212 if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {

1213 return nullptr;

1214 }

1217 return nullptr;

1218 } else {

1221 return nullptr;

1222 for (const auto *EltT : ST->elements()) {

1223 if (EltT != UT->getElementType())

1224 return nullptr;

1225 }

1226 }

1227 return U;

1228}

1229

1230

1231

1232

1233

1234

1235

1236

1237

1238

1239

1240

1241

1242

1243

1244

1245

1246

1247

1248

1249

1251

1252

1253 if (SI.isUnordered())

1254 return false;

1255

1256

1257 if (SI.getPointerOperand()->isSwiftError())

1258 return false;

1259

1260 Value *V = SI.getValueOperand();

1261

1262

1264 assert(!BC->getType()->isX86_AMXTy() &&

1265 "store to x86_amx* should not happen!");

1266 V = BC->getOperand(0);

1267

1268

1269 if (V->getType()->isX86_AMXTy())

1270 return false;

1273 return true;

1274 }

1275 }

1276

1280 return true;

1281 }

1282

1283

1284

1285 return false;

1286}

1287

1289

1290

1291 if (SI.isSimple())

1292 return false;

1293

1294 Value *V = SI.getValueOperand();

1295 Type *T = V->getType();

1296

1297 if (T->isAggregateType())

1298 return false;

1299

1301

1302 unsigned Count = ST->getNumElements();

1303 if (Count == 1) {

1306 return true;

1307 }

1308

1309

1310

1312 auto *SL = DL.getStructLayout(ST);

1313

1314 if (SL->hasPadding())

1315 return false;

1316

1317 const auto Align = SI.getAlign();

1318

1320 EltName += ".elt";

1321 auto *Addr = SI.getPointerOperand();

1323 AddrName += ".repack";

1324

1325 auto *IdxType = DL.getIndexType(Addr->getType());

1326 for (unsigned i = 0; i < Count; i++) {

1329 AddrName);

1331 auto EltAlign =

1335 }

1336

1337 return true;

1338 }

1339

1341

1342 auto NumElements = AT->getNumElements();

1343 if (NumElements == 1) {

1346 return true;

1347 }

1348

1349

1350

1351

1352

1354 return false;

1355

1357 TypeSize EltSize = DL.getTypeAllocSize(AT->getElementType());

1358 const auto Align = SI.getAlign();

1359

1361 EltName += ".elt";

1362 auto *Addr = SI.getPointerOperand();

1364 AddrName += ".repack";

1365

1367 auto *Zero = ConstantInt::get(IdxType, 0);

1368

1370 for (uint64_t i = 0; i < NumElements; i++) {

1371 Value *Indices[2] = {

1372 Zero,

1373 ConstantInt::get(IdxType, i),

1374 };

1375 auto *Ptr =

1382 }

1383

1384 return true;

1385 }

1386

1387 return false;

1388}

1389

1390

1391

1392

1393

1394

1395

1396

1397

1399

1400 if (A == B) return true;

1401

1402

1403

1404

1405

1406

1413 return true;

1414

1415

1416 return false;

1417}

1418

1420 Value *Val = SI.getOperand(0);

1421 Value *Ptr = SI.getOperand(1);

1422

1423

1426

1427

1430

1431

1434

1435

1436

1437 if (SI.isUnordered()) return nullptr;

1438

1439

1440

1446 if (GEP->getOperand(0)->hasOneUse())

1448 }

1449 }

1450 }

1451

1452

1453

1454

1455 if (isModSet(AA->getModRefInfoMask(Ptr)))

1457

1458

1459

1460

1462 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;

1463 --ScanInsts) {

1464 --BBI;

1465

1466

1467 if (BBI->isDebugOrPseudoInst()) {

1468 ScanInsts++;

1469 continue;

1470 }

1471

1473

1474 if (PrevSI->isUnordered() &&

1476 PrevSI->getValueOperand()->getType() ==

1477 SI.getValueOperand()->getType()) {

1478 ++NumDeadStore;

1479

1480

1481

1484 return nullptr;

1485 }

1486 break;

1487 }

1488

1489

1490

1491

1494 assert(SI.isUnordered() && "can't eliminate ordering operation");

1496 }

1497

1498

1499

1500 break;

1501 }

1502

1503

1504 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())

1505 break;

1506 }

1507

1508

1509

1513 return nullptr;

1514 }

1515

1516

1518

1520

1521

1522

1526 return nullptr;

1527 }

1528

1529

1530

1531

1534

1536 if (Value *V = simplifyNonNullOperand(Ptr, true))

1538

1539 return nullptr;

1540}

1541

1542

1543

1544

1545

1546

1548 if (SI.isUnordered())

1549 return false;

1550

1551

1555 return false;

1556

1557

1559 if (*PredIter == StoreBB)

1560 ++PredIter;

1562

1563

1564

1565 if (StoreBB == DestBB || OtherBB == DestBB)

1566 return false;

1567

1568

1571 if (!OtherBr || BBI == OtherBB->begin())

1572 return false;

1573

1574 auto OtherStoreIsMergeable = [&](StoreInst *OtherStore) -> bool {

1575 if (!OtherStore ||

1576 OtherStore->getPointerOperand() != SI.getPointerOperand())

1577 return false;

1578

1579 auto *SIVTy = SI.getValueOperand()->getType();

1580 auto *OSVTy = OtherStore->getValueOperand()->getType();

1582 SI.hasSameSpecialState(OtherStore);

1583 };

1584

1585

1586

1587 StoreInst *OtherStore = nullptr;

1589 --BBI;

1590

1591 while (BBI->isDebugOrPseudoInst()) {

1592 if (BBI==OtherBB->begin())

1593 return false;

1594 --BBI;

1595 }

1596

1597

1599 if (!OtherStoreIsMergeable(OtherStore))

1600 return false;

1601 } else {

1602

1603

1606 return false;

1607

1608

1609

1610

1611 for (;; --BBI) {

1612

1614 if (OtherStoreIsMergeable(OtherStore))

1615 break;

1616

1617

1618

1619 if (BBI->mayReadFromMemory() || BBI->mayThrow() ||

1620 BBI->mayWriteToMemory() || BBI == OtherBB->begin())

1621 return false;

1622 }

1623

1624

1625

1627

1628 if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory())

1629 return false;

1630 }

1631 }

1632

1633

1635

1638 if (MergedVal != SI.getValueOperand()) {

1640 PHINode::Create(SI.getValueOperand()->getType(), 2, "storemerge");

1642 Builder.SetInsertPoint(OtherStore);

1644 OtherBB);

1647 }

1648

1649

1652 new StoreInst(MergedVal, SI.getOperand(1), SI.isVolatile(), SI.getAlign(),

1653 SI.getOrdering(), SI.getSyncScopeID());

1657

1658

1660 if (AATags)

1662

1663

1666 return true;

1667}

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

static void addToWorklist(Instruction &I, SmallVector< Instruction *, 4 > &Worklist)

This file provides internal interfaces used to implement the InstCombine.

static StoreInst * combineStoreToNewValue(InstCombinerImpl &IC, StoreInst &SI, Value *V)

Combine a store to a new type.

Definition InstCombineLoadStoreAlloca.cpp:616

static Instruction * combineLoadToOperationType(InstCombinerImpl &IC, LoadInst &Load)

Combine loads to match the type of their uses' value after looking through intervening bitcasts.

Definition InstCombineLoadStoreAlloca.cpp:686

static Instruction * replaceGEPIdxWithZero(InstCombinerImpl &IC, Value *Ptr, Instruction &MemI)

Definition InstCombineLoadStoreAlloca.cpp:1000

static Instruction * simplifyAllocaArraySize(InstCombinerImpl &IC, AllocaInst &AI, DominatorTree &DT)

Definition InstCombineLoadStoreAlloca.cpp:184

static bool canSimplifyNullStoreOrGEP(StoreInst &SI)

Definition InstCombineLoadStoreAlloca.cpp:1016

static bool equivalentAddressValues(Value *A, Value *B)

equivalentAddressValues - Test if A and B will obviously have the same value.

Definition InstCombineLoadStoreAlloca.cpp:1398

static bool canReplaceGEPIdxWithZero(InstCombinerImpl &IC, GetElementPtrInst *GEPI, Instruction *MemI, unsigned &Idx)

Definition InstCombineLoadStoreAlloca.cpp:925

static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op)

Definition InstCombineLoadStoreAlloca.cpp:1027

static bool isSupportedAtomicType(Type *Ty)

Definition InstCombineLoadStoreAlloca.cpp:587

static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI, const DataLayout &DL)

Returns true if V is dereferenceable for size of alloca.

Definition InstCombineLoadStoreAlloca.cpp:173

static Instruction * unpackLoadToAggregate(InstCombinerImpl &IC, LoadInst &LI)

Definition InstCombineLoadStoreAlloca.cpp:731

static cl::opt< unsigned > MaxCopiedFromConstantUsers("instcombine-max-copied-from-constant-users", cl::init(300), cl::desc("Maximum users to visit in copy from constant transform"), cl::Hidden)

static bool combineStoreToValueType(InstCombinerImpl &IC, StoreInst &SI)

Combine stores to match the type of value being stored.

Definition InstCombineLoadStoreAlloca.cpp:1250

static bool unpackStoreToAggregate(InstCombinerImpl &IC, StoreInst &SI)

Definition InstCombineLoadStoreAlloca.cpp:1288

static Value * likeBitCastFromVector(InstCombinerImpl &IC, Value *V)

Look for extractelement/insertvalue sequence that acts like a bitcast.

Definition InstCombineLoadStoreAlloca.cpp:1189

static bool isOnlyCopiedFromConstantMemory(AAResults *AA, AllocaInst *V, MemTransferInst *&TheCopy, SmallVectorImpl< Instruction * > &ToDelete)

isOnlyCopiedFromConstantMemory - Recursively walk the uses of a (derived) pointer to an alloca.

Definition InstCombineLoadStoreAlloca.cpp:46

static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize, const DataLayout &DL)

Definition InstCombineLoadStoreAlloca.cpp:843

This file provides the interface for the instcombine pass implementation.

const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]

This file implements a map that provides insertion order iteration.

This file defines the SmallString class.

This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...

#define STATISTIC(VARNAME, DESC)

static const uint32_t IV[8]

Class for arbitrary precision integers.

LLVM_ABI APInt zext(unsigned width) const

Zero extend to a new width.

an instruction to allocate memory on the stack

Align getAlign() const

Return the alignment of the memory that is being allocated by the instruction.

PointerType * getType() const

Overload to return most specific pointer type.

Type * getAllocatedType() const

Return the type that is being allocated by the instruction.

bool isUsedWithInAlloca() const

Return true if this alloca is used as an inalloca argument to a call.

unsigned getAddressSpace() const

Return the address space for the allocation.

LLVM_ABI bool isArrayAllocation() const

Return true if there is an allocation size parameter to the allocation instruction that is not 1.

void setAlignment(Align Align)

const Value * getArraySize() const

Get the number of elements allocated.

static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)

This static method is the primary way to construct an ArrayType.

LLVM Basic Block Representation.

iterator begin()

Instruction iterator methods.

LLVM_ABI const_iterator getFirstInsertionPt() const

Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...

LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const

Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...

LLVM_ABI bool hasNPredecessors(unsigned N) const

Return true if this block has exactly N predecessors.

InstListType::iterator iterator

Instruction iterators...

const Instruction * getTerminator() const LLVM_READONLY

Returns the terminator instruction if the block is well formed or null if the block is not well forme...

This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...

Conditional or Unconditional Branch instruction.

BasicBlock * getSuccessor(unsigned i) const

bool isUnconditional() const

static LLVM_ABI bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)

Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.

This is the shared class of boolean and integer constants.

const APInt & getValue() const

Return the constant as an APInt value reference.

static LLVM_ABI Constant * getNullValue(Type *Ty)

Constructor to create a '0' constant of arbitrary type.

A parsed version of the target data layout string in and methods for querying it.

LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const

Returns the type of a GEP index in AddressSpace.

static LLVM_ABI DebugLoc getMergedLocation(DebugLoc LocA, DebugLoc LocB)

When two instructions are combined into a single instruction we also need to combine the original loc...

Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.

an instruction for type-safe pointer arithmetic to access elements of arrays and structs

LLVM_ABI bool isInBounds() const

Determine whether the GEP has the inbounds flag.

static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)

Returns the result type of a getelementptr with the given source element type and indexes.

Type * getSourceElementType() const

AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")

Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")

LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)

Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")

Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")

ConstantInt * getInt32(uint32_t C)

Get a constant 32-bit value.

LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)

Create an expression which evaluates to the number of units in Size at runtime.

Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")

void SetInsertPoint(BasicBlock *TheBB)

This specifies that created instructions should be appended to the end of the specified block.

StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)

Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")

LLVM_ABI CallInst * CreateMemTransferInst(Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, Value *Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())

void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)

Instruction * visitLoadInst(LoadInst &LI)

Definition InstCombineLoadStoreAlloca.cpp:1088

void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)

Instruction * eraseInstFromFunction(Instruction &I) override

Combiner aware instruction erasure.

Instruction * visitStoreInst(StoreInst &SI)

Definition InstCombineLoadStoreAlloca.cpp:1419

bool mergeStoreIntoSuccessor(StoreInst &SI)

Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; }...

Definition InstCombineLoadStoreAlloca.cpp:1547

void CreateNonTerminatorUnreachable(Instruction *InsertAt)

Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...

bool removeInstructionsBeforeUnreachable(Instruction &I)

LoadInst * combineLoadToNewType(LoadInst &LI, Type *NewTy, const Twine &Suffix="")

Helper to combine a load to a new type.

Definition InstCombineLoadStoreAlloca.cpp:600

Instruction * visitAllocSite(Instruction &FI)

Instruction * visitAllocaInst(AllocaInst &AI)

Definition InstCombineLoadStoreAlloca.cpp:499

const DataLayout & getDataLayout() const

Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)

Inserts an instruction New before instruction Old.

Instruction * replaceInstUsesWith(Instruction &I, Value *V)

A combiner-aware RAUW-like routine.

uint64_t MaxArraySizeForCombine

Maximum size of array considered when transforming.

InstructionWorklist & Worklist

A worklist of the instructions that need to be simplified.

Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)

Same as InsertNewInstBefore, but also sets the debug loc.

void computeKnownBits(const Value *V, KnownBits &Known, const Instruction *CxtI, unsigned Depth=0) const

Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)

Replace operand of instruction and add old operand to the worklist.

LLVM_ABI Instruction * clone() const

Create a copy of 'this' instruction that is identical in all ways except the following:

LLVM_ABI bool isLifetimeStartOrEnd() const LLVM_READONLY

Return true if the instruction is a llvm.lifetime.start or llvm.lifetime.end marker.

LLVM_ABI void mergeDIAssignID(ArrayRef< const Instruction * > SourceInstructions)

Merge the DIAssignID metadata from this instruction and those attached to instructions in SourceInstr...

const DebugLoc & getDebugLoc() const

Return the debug location for this node as a DebugLoc.

LLVM_ABI void setAAMetadata(const AAMDNodes &N)

Sets the AA metadata on this instruction from the AAMDNodes structure.

LLVM_ABI void moveBefore(InstListType::iterator InsertPos)

Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...

LLVM_ABI bool isAtomic() const LLVM_READONLY

Return true if this instruction has an AtomicOrdering of unordered or higher.

LLVM_ABI const Function * getFunction() const

Return the function this instruction belongs to.

LLVM_ABI BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY

Return the specified successor. This instruction must be a terminator.

LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)

Set the metadata of the specified kind to the specified node.

LLVM_ABI AAMDNodes getAAMetadata() const

Returns the AA metadata for this instruction.

void setDebugLoc(DebugLoc Loc)

Set the debug location information for this instruction.

LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())

Copy metadata from SrcInst to this instruction.

An instruction for reading from memory.

unsigned getPointerAddressSpace() const

Returns the address space of the pointer operand.

void setAlignment(Align Align)

Value * getPointerOperand()

bool isVolatile() const

Return true if this is a load from a volatile memory location.

void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)

Sets the ordering constraint and the synchronization scope ID of this load instruction.

AtomicOrdering getOrdering() const

Returns the ordering constraint of this load instruction.

SyncScope::ID getSyncScopeID() const

Returns the synchronization scope ID of this load instruction.

Align getAlign() const

Return the alignment of the access that is being performed.

This class wraps the llvm.memcpy/memmove intrinsics.

void addIncoming(Value *V, BasicBlock *BB)

Add an incoming value to the end of the PHI list.

static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...

PointerIntPair - This class implements a pair of a pointer and small integer.

static LLVM_ABI PoisonValue * get(Type *T)

Static factory methods - Return an 'poison' object of the specified type.

This class represents the LLVM 'select' instruction.

static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)

bool contains(const_arg_type key) const

Check if the SetVector contains the given key.

bool insert(const value_type &X)

Insert a new element into the SetVector.

std::pair< iterator, bool > insert(PtrType Ptr)

Inserts Ptr if and only if there is no element in the container equal to Ptr.

bool contains(ConstPtrType Ptr) const

SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.

SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

reference emplace_back(ArgTypes &&... Args)

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

An instruction for storing to memory.

Value * getValueOperand()

void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)

Sets the ordering constraint and the synchronization scope ID of this store instruction.

StringRef - Represent a constant reference to a string, i.e.

Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...

static constexpr TypeSize getZero()

The instances of the Type class are immutable: once they are created, they are never changed.

static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)

LLVM_ABI bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const

Return true if this is a type whose size is a known multiple of vscale.

LLVM_ABI unsigned getPointerAddressSpace() const

Get the address space of this pointer or pointer vector type.

bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const

Return true if it makes sense to take the size of this type.

bool isPtrOrPtrVectorTy() const

Return true if this is a pointer type or a vector of pointer types.

bool isX86_AMXTy() const

Return true if this is X86 AMX.

bool isIntegerTy() const

True if this is an instance of IntegerType.

void setOperand(unsigned i, Value *Val)

Value * getOperand(unsigned i) const

unsigned getNumOperands() const

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

bool hasOneUse() const

Return true if there is exactly one use of this value.

LLVM_ABI void replaceAllUsesWith(Value *V)

Change all uses of this to point to a new Value.

iterator_range< use_iterator > uses()

LLVM_ABI StringRef getName() const

Return a constant reference to the value's name.

LLVM_ABI void takeName(Value *V)

Transfer the name from V to this value.

constexpr ScalarTy getFixedValue() const

constexpr bool isScalable() const

Returns whether the quantity is scaled by a runtime quantity (vscale).

const ParentTy * getParent() const

self_iterator getIterator()

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

Abstract Attribute helper functions.

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

@ C

The default llvm calling convention, compatible with C.

bool match(Val *V, const Pattern &P)

auto m_Undef()

Match an arbitrary undef constant.

initializer< Ty > init(const Ty &Val)

LLVM_ABI bool isAvailable()

friend class Instruction

Iterator for Instructions in a `BasicBlock.

This is an optimization pass for GlobalISel generic memory operations.

FunctionAddr VTableAddr Value

bool all_of(R &&range, UnaryPredicate P)

Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

LLVM_ABI bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)

Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.

LLVM_ABI void copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source)

Copy the metadata from the source instruction to the destination (the replacement for the source inst...

void append_range(Container &C, Range &&R)

Wrapper function to append range R to container C.

LLVM_ABI Value * FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan=DefMaxInstsToScan, BatchAAResults *AA=nullptr, bool *IsLoadCSE=nullptr, unsigned *NumScanedInst=nullptr)

Scan backwards to see if we have the value of the given load available locally within a small number ...

auto reverse(ContainerTy &&C)

LLVM_ABI Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)

Try to ensure that the alignment of V is at least PrefAlign bytes.

bool isModSet(const ModRefInfo MRI)

LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)

Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...

LLVM_ABI bool isSafeToLoadUnconditionally(Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, Instruction *ScanFrom, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)

Return true if we know that executing a load from this value cannot trap.

LLVM_ABI raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

FunctionAddr VTableAddr Count

class LLVM_GSL_OWNER SmallVector

Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

LLVM_ABI bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)

Point debug users of From to To or salvage them.

LLVM_ABI Value * simplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q)

Given a load instruction and its pointer operand, fold the result or return null.

LLVM_ABI void combineMetadataForCSE(Instruction *K, const Instruction *J, bool DoesKMove)

Combine the metadata of two instructions so that K can replace J.

void replace(R &&Range, const T &OldValue, const T &NewValue)

Provide wrappers to std::replace which take ranges instead of having to pass begin/end explicitly.

DWARFExpression::Operation Op

PredIterator< BasicBlock, Value::user_iterator > pred_iterator

ArrayRef(const T &OneElt) -> ArrayRef< T >

auto pred_begin(const MachineBasicBlock *BB)

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

Align commonAlignment(Align A, uint64_t Offset)

Returns the alignment that satisfies both alignments.

A collection of metadata nodes that might be associated with a memory access used by the alias-analys...

LLVM_ABI AAMDNodes merge(const AAMDNodes &Other) const

Given two sets of AAMDNodes applying to potentially different locations, determine the best AAMDNodes...

This struct is a compact representation of a valid (non-zero power of two) alignment.

bool isNonNegative() const

Returns true if this value is known to be non-negative.