LLVM: lib/IR/Instructions.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

47#include

48#include

49#include

50#include

51#include

52

53using namespace llvm;

54

56 "disable-i2p-p2i-opt", cl::init(false),

57 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));

58

59

60

61

62

63std::optional

68 if (C)

69 return std::nullopt;

70 assert(Size.isScalable() && "Array elements cannot have a scalable size");

71 auto CheckedProd =

73 if (!CheckedProd)

74 return std::nullopt;

76 }

78}

79

80std::optional

84 return std::nullopt;

87 if (!CheckedProd)

88 return std::nullopt;

90}

91

92

93

94

95

96

97

100 return "both values to select must have same type";

101

103 return "select values cannot have token type";

104

106

108 return "vector select condition element type must be i1";

110 if (!ET)

111 return "selected values for vector select must be vectors";

113 return "vector select requires selected vectors to have "

114 "the same vector length as select condition";

116 return "select condition must be i1 or ";

117 }

118 return nullptr;

119}

120

121

122

123

124

125PHINode::PHINode(const PHINode &PN)

127 ReservedSpace(PN.getNumOperands()) {

133}

134

135

136

139

140

141

142

143

144

147

148

149 Op<-1>().set(nullptr);

151

152

154

157 }

158 return Removed;

159}

160

162 bool DeletePHIIfEmpty) {

165 if (Predicate(Idx))

167

168 if (RemoveIndices.empty())

169 return;

170

171

173 return RemoveIndices.contains(U.getOperandNo());

174 });

176 U.set(nullptr);

177

178

182 });

183

185

186

188

191 }

192}

193

194

195

196

197

198void PHINode::growOperands() {

200 unsigned NumOps = e + e / 2;

201 if (NumOps < 2) NumOps = 2;

202

203 ReservedSpace = NumOps;

205}

206

207

208

210

214 if (ConstantValue != this)

215 return nullptr;

216

218 }

219 if (ConstantValue == this)

221 return ConstantValue;

222}

223

224

225

226

227

228

230 Value *ConstantValue = nullptr;

234 if (ConstantValue && ConstantValue != Incoming)

235 return false;

237 }

238 }

239 return true;

240}

241

242

243

244

245

246LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,

247 const Twine &NameStr,

250 init(NumReservedValues, NameStr);

251}

252

253LandingPadInst::LandingPadInst(const LandingPadInst &LP)

255 ReservedSpace(LP.getNumOperands()) {

260 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)

261 OL[I] = InOL[I];

262

264}

265

267 const Twine &NameStr,

269 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);

270}

271

272void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {

273 ReservedSpace = NumReservedValues;

278}

279

280

281

282void LandingPadInst::growOperands(unsigned Size) {

284 if (ReservedSpace >= e + Size) return;

285 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;

287}

288

291 growOperands(1);

292 assert(OpNo < ReservedSpace && "Growing didn't work!");

295}

296

297

298

299

300

304 case Instruction::Call:

305 return CallInst::Create(cast(CB), Bundles, InsertPt);

306 case Instruction::Invoke:

308 case Instruction::CallBr:

310 default:

312 }

313}

314

320 if (ChildOB.getTagName() != OpB.getTag())

322 }

325}

326

328

330 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");

331 return cast(this)->getNumIndirectDests() + 1;

332}

333

336 if (isa(V) || isa(V))

337 return false;

339}

340

341

342

344 if (auto *CI = dyn_cast(this))

345 return CI->isMustTailCall();

346 return false;

347}

348

349

351 if (auto *CI = dyn_cast(this))

352 return CI->isTailCall();

353 return false;

354}

355

358 return F->getIntrinsicID();

360}

361

364

366 Mask |= F->getAttributes().getRetNoFPClass();

367 return Mask;

368}

369

372

374 Mask |= F->getAttributes().getParamNoFPClass(i);

375 return Mask;

376}

377

382 return std::nullopt;

383}

384

387 return true;

388

391 return true;

392

393 return false;

394}

395

397 unsigned Index;

398

402 if (F->getAttributes().hasAttrSomewhere(Kind, &Index))

404

405 return nullptr;

406}

407

408

410 assert(ArgNo < arg_size() && "Param index out of bounds!");

411

413 return true;

414

416 if (F)

417 return false;

418

419 if (F->getAttributes().hasParamAttr(ArgNo, Kind))

420 return false;

421

422

423 switch (Kind) {

424 case Attribute::ReadNone:

426 case Attribute::ReadOnly:

428 case Attribute::WriteOnly:

430 default:

431 return true;

432 }

433}

434

435bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {

437 return F->getAttributes().hasFnAttr(Kind);

438

439 return false;

440}

441

442bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {

444 return F->getAttributes().hasFnAttr(Kind);

445

446 return false;

447}

448

449template

450Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {

451 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {

452

453

454 assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");

455 }

456

458 return F->getAttributes().getFnAttr(Kind);

459

461}

462

465template Attribute CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;

466

467template

468Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,

469 AK Kind) const {

471

472 if (auto *F = dyn_cast(V))

473 return F->getAttributes().getParamAttr(ArgNo, Kind);

474

476}

478CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,

480template Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,

482

487}

488

491 const unsigned BeginIndex) {

492 auto It = op_begin() + BeginIndex;

493 for (auto &B : Bundles)

494 It = std::copy(B.input_begin(), B.input_end(), It);

495

497 auto BI = Bundles.begin();

498 unsigned CurrentIndex = BeginIndex;

499

501 assert(BI != Bundles.end() && "Incorrect allocation?");

502

503 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());

504 BOI.Begin = CurrentIndex;

505 BOI.End = CurrentIndex + BI->input_size();

506 CurrentIndex = BOI.End;

507 BI++;

508 }

509

510 assert(BI == Bundles.end() && "Incorrect allocation?");

511

512 return It;

513}

514

516

517

518

521 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)

522 return BOI;

523

524 llvm_unreachable("Did not find operand bundle for operand!");

525 }

526

527 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");

530 "The Idx isn't in the operand bundle");

531

532

533

534 constexpr unsigned NumberScaling = 1024;

535

539

540 while (Begin != End) {

541 unsigned ScaledOperandPerBundle =

542 NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);

543 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /

544 ScaledOperandPerBundle);

545 if (Current >= End)

546 Current = std::prev(End);

547 assert(Current < End && Current >= Begin &&

548 "the operand bundle doesn't cover every value in the range");

549 if (OpIdx >= Current->Begin && OpIdx < Current->End)

550 break;

551 if (OpIdx >= Current->End)

552 Begin = Current + 1;

553 else

554 End = Current;

555 }

556

557 assert(OpIdx >= Current->Begin && OpIdx < Current->End &&

558 "the operand bundle doesn't cover every value in the range");

559 return *Current;

560}

561

566 return CB;

567

571 return Create(CB, Bundles, InsertPt);

572}

573

577 bool CreateNew = false;

578

581 if (Bundle.getTagID() == ID) {

582 CreateNew = true;

583 continue;

584 }

586 }

587

588 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;

589}

590

592

593

594

598}

599

605}

606

612

617 }

618 ME &= FnME;

619 }

620 return ME;

621}

624}

625

626

629}

632}

633

634

637}

640}

641

642

645}

648}

649

650

651

654}

657}

658

659

660

663}

666}

667

668

669

672}

676}

677

678

679

680

681

684 this->FTy = FTy;

686 "NumOperands not set up?");

687

688#ifndef NDEBUG

691 "Calling a function with bad signature!");

692

693 for (unsigned i = 0; i != Args.size(); ++i)

696 "Calling a function with a bad signature!");

697#endif

698

699

700

703

705 (void)It;

706 assert(It + 1 == op_end() && "Should add up!");

707

709}

710

712 this->FTy = FTy;

715

717

719}

720

724 InsertBefore) {

726}

727

731 "Wrong number of operands allocated");

734

739}

740

744

746 Args, OpB, CI->getName(), InsertPt);

752 return NewCI;

753}

754

755

756

757

759 if (T == 0) {

760 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "

761 "div by 0. Ignoring. Likely the function "

763 << " has 0 entry count, and contains call instructions "

764 "with non-zero prof info.");

765 return;

766 }

768}

769

770

771

772

773

777 const Twine &NameStr) {

778 this->FTy = FTy;

779

782 "NumOperands not set up?");

783

784#ifndef NDEBUG

787 "Invoking a function with bad signature");

788

789 for (unsigned i = 0, e = Args.size(); i != e; i++)

792 "Invoking a function with a bad signature!");

793#endif

794

795

796

801

803 (void)It;

804 assert(It + 3 == op_end() && "Should add up!");

805

807}

808

812 "Wrong number of operands allocated");

814 std::copy(II.op_begin(), II.op_end(), op_begin());

815 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),

818}

819

822 std::vector<Value *> Args(II->arg_begin(), II->arg_end());

823

825 II->getFunctionType(), II->getCalledOperand(), II->getNormalDest(),

826 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);

827 NewII->setCallingConv(II->getCallingConv());

828 NewII->SubclassOptionalData = II->SubclassOptionalData;

829 NewII->setAttributes(II->getAttributes());

830 NewII->setDebugLoc(II->getDebugLoc());

831 return NewII;

832}

833

835 return cast(getUnwindDest()->getFirstNonPHI());

836}

837

839 if (T == 0) {

840 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "

841 "div by 0. Ignoring. Likely the function "

843 << " has 0 entry count, and contains call instructions "

844 "with non-zero prof info.");

845 return;

846 }

848}

849

850

851

852

853

858 const Twine &NameStr) {

859 this->FTy = FTy;

860

862 IndirectDests.size(),

864 "NumOperands not set up?");

865

866#ifndef NDEBUG

869 "Calling a function with bad signature");

870

871 for (unsigned i = 0, e = Args.size(); i != e; i++)

874 "Calling a function with a bad signature!");

875#endif

876

877

878

879 std::copy(Args.begin(), Args.end(), op_begin());

880 NumIndirectDests = IndirectDests.size();

882 for (unsigned i = 0; i != NumIndirectDests; ++i)

885

887 (void)It;

888 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");

889

891}

892

897 "Wrong number of operands allocated");

903 NumIndirectDests = CBI.NumIndirectDests;

904}

905

909

917 NewCBI->NumIndirectDests = CBI->NumIndirectDests;

918 return NewCBI;

919}

920

921

922

923

924

929 "Wrong number of operands allocated");

933}

934

938 InsertBefore) {

939 if (retVal)

940 Op<0>() = retVal;

941}

942

943

944

945

946

947ResumeInst::ResumeInst(const ResumeInst &RI)

949 AllocMarker) {

951}

952

955 AllocMarker, InsertBefore) {

957}

958

959

960

961

962

963CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI,

967 "Wrong number of operands allocated");

968 setSubclassDataInstruction::OpaqueField(

973}

974

975void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {

976 if (UnwindBB)

977 setSubclassData(true);

978

979 Op<0>() = CleanupPad;

980 if (UnwindBB)

981 Op<1>() = UnwindBB;

982}

983

984CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,

989 init(CleanupPad, UnwindBB);

990}

991

992

993

994

995void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {

996 Op<0>() = CatchPad;

998}

999

1000CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)

1002 AllocMarker) {

1005}

1006

1007CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,

1010 AllocMarker, InsertBefore) {

1011 init(CatchPad, BB);

1012}

1013

1014

1015

1016

1017

1018CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,

1019 unsigned NumReservedValues,

1020 const Twine &NameStr,

1023 InsertBefore) {

1024 if (UnwindDest)

1025 ++NumReservedValues;

1026 init(ParentPad, UnwindDest, NumReservedValues + 1);

1028}

1029

1030CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)

1037 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)

1038 OL[I] = InOL[I];

1039}

1040

1041void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,

1042 unsigned NumReservedValues) {

1043 assert(ParentPad && NumReservedValues);

1044

1045 ReservedSpace = NumReservedValues;

1048

1049 Op<0>() = ParentPad;

1050 if (UnwindDest) {

1051 setSubclassData(true);

1053 }

1054}

1055

1056

1057

1058void CatchSwitchInst::growOperands(unsigned Size) {

1060 assert(NumOperands >= 1);

1061 if (ReservedSpace >= NumOperands + Size)

1062 return;

1063 ReservedSpace = (NumOperands + Size / 2) * 2;

1065}

1066

1069 growOperands(1);

1070 assert(OpNo < ReservedSpace && "Growing didn't work!");

1073}

1074

1076

1078 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)

1079 *CurDst = *(CurDst + 1);

1080

1081 *EndDst = nullptr;

1082

1084}

1085

1086

1087

1088

1090 const Twine &NameStr) {

1095}

1096

1100 "Wrong number of operands allocated");

1103}

1104

1107 const Twine &NameStr,

1110 init(ParentPad, Args, NameStr);

1111}

1112

1113

1114

1115

1116

1120 AllocMarker, InsertBefore) {}

1121

1122

1123

1124

1125

1126void BranchInst::AssertOK() {

1129 "May only branch on boolean predicates!");

1130}

1131

1136 assert(IfTrue && "Branch destination may not be null!");

1137 Op<-1>() = IfTrue;

1138}

1139

1144

1146 Op<-2>() = IfFalse;

1147 Op<-1>() = IfTrue;

1148#ifndef NDEBUG

1149 AssertOK();

1150#endif

1151}

1152

1157 "Wrong number of operands allocated");

1158

1161 Op<-3>() = BI.Op<-3>();

1162 Op<-2>() = BI.Op<-2>();

1163 }

1164 Op<-1>() = BI.Op<-1>();

1166}

1167

1170 "Cannot swap successors of an unconditional branch");

1172

1173

1174

1176}

1177

1178

1179

1180

1181

1183 if (!Amt)

1185 else {

1186 assert(!isa(Amt) &&

1187 "Passed basic block into allocation size parameter! Use other ctor");

1189 "Allocation array size is not an integer!");

1190 }

1191 return Amt;

1192}

1193

1196 "Insertion position cannot be null when alignment not provided!");

1199 "BB must be in a Function when alignment not provided!");

1201 return DL.getPrefTypeAlign(Ty);

1202}

1203

1206 : AllocaInst(Ty, AddrSpace, nullptr, Name, InsertBefore) {}

1207

1210 : AllocaInst(Ty, AddrSpace, ArraySize,

1212 InsertBefore) {}

1213

1218 getAISize(Ty->getContext(), ArraySize), InsertBefore),

1219 AllocatedType(Ty) {

1223}

1224

1227 return !CI->isOne();

1228 return true;

1229}

1230

1231

1232

1233

1235

1236 if (!isa(getArraySize())) return false;

1237

1238

1241}

1242

1243

1244

1245

1246

1247void LoadInst::AssertOK() {

1249 "Ptr must have pointer type.");

1250}

1251

1254 "Insertion position cannot be null when alignment not provided!");

1257 "BB must be in a Function when alignment not provided!");

1259 return DL.getABITypeAlign(Ty);

1260}

1261

1265

1270

1274 SyncScope::System, InsertBef) {}

1275

1283 AssertOK();

1285}

1286

1287

1288

1289

1290

1291void StoreInst::AssertOK() {

1294 "Ptr must have pointer type!");

1295}

1296

1298 : StoreInst(val, addr, false, InsertBefore) {}

1299

1302 : StoreInst(val, addr, isVolatile,

1304 InsertBefore) {}

1305

1309 SyncScope::System, InsertBefore) {}

1310

1314 : Instruction(Type::getVoidTy(val->getContext()), Store, AllocMarker,

1315 InsertBefore) {

1321 AssertOK();

1322}

1323

1324

1325

1326

1327

1334 Op<2>() = NewVal;

1339

1341 "All operands must be non-null!");

1343 "Ptr must have pointer type!");

1345 "Cmp type and NewVal type must be same!");

1346}

1347

1349 Align Alignment,

1356 AtomicCmpXchg, AllocMarker, InsertBefore) {

1357 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);

1358}

1359

1360

1361

1362

1363

1368 "atomicrmw instructions can only be atomic.");

1370 "atomicrmw instructions cannot be unordered.");

1377

1380 "Ptr must have pointer type!");

1382 "AtomicRMW instructions must be atomic!");

1383}

1384

1388 : Instruction(Val->getType(), AtomicRMW, AllocMarker, InsertBefore) {

1390}

1391

1393 switch (Op) {

1395 return "xchg";

1397 return "add";

1399 return "sub";

1401 return "and";

1403 return "nand";

1405 return "or";

1407 return "xor";

1409 return "max";

1411 return "min";

1413 return "umax";

1415 return "umin";

1417 return "fadd";

1419 return "fsub";

1421 return "fmax";

1423 return "fmin";

1425 return "uinc_wrap";

1427 return "udec_wrap";

1429 return "usub_cond";

1431 return "usub_sat";

1433 return "";

1434 }

1435

1437}

1438

1439

1440

1441

1442

1445 : Instruction(Type::getVoidTy(C), Fence, AllocMarker, InsertBefore) {

1448}

1449

1450

1451

1452

1453

1457 "NumOperands not initialized?");

1461}

1462

1463GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI,

1466 SourceElementType(GEPI.SourceElementType),

1467 ResultElementType(GEPI.ResultElementType) {

1469 "Wrong number of operands allocated");

1472}

1473

1475 if (auto *Struct = dyn_cast(Ty)) {

1477 return nullptr;

1478 return Struct->getTypeAtIndex(Idx);

1479 }

1480 if (Idx->getType()->isIntOrIntVectorTy())

1481 return nullptr;

1482 if (auto *Array = dyn_cast(Ty))

1483 return Array->getElementType();

1484 if (auto *Vector = dyn_cast(Ty))

1485 return Vector->getElementType();

1486 return nullptr;

1487}

1488

1490 if (auto *Struct = dyn_cast(Ty)) {

1491 if (Idx >= Struct->getNumElements())

1492 return nullptr;

1493 return Struct->getElementType(Idx);

1494 }

1495 if (auto *Array = dyn_cast(Ty))

1496 return Array->getElementType();

1497 if (auto *Vector = dyn_cast(Ty))

1498 return Vector->getElementType();

1499 return nullptr;

1500}

1501

1502template

1504 if (IdxList.empty())

1505 return Ty;

1506 for (IndexTy V : IdxList.slice(1)) {

1508 if (!Ty)

1509 return Ty;

1510 }

1511 return Ty;

1512}

1513

1516}

1517

1521}

1522

1525}

1526

1527

1528

1529

1531 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {

1533 if (!CI->isZero()) return false;

1534 } else {

1535 return false;

1536 }

1537 }

1538 return true;

1539}

1540

1541

1542

1543

1545 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {

1546 if (!isa(getOperand(i)))

1547 return false;

1548 }

1549 return true;

1550}

1551

1554}

1555

1557 GEPNoWrapFlags NW = cast(this)->getNoWrapFlags();

1558 if (B)

1560 else

1563}

1564

1566 return cast(this)->getNoWrapFlags();

1567}

1568

1570 return cast(this)->isInBounds();

1571}

1572

1574 return cast(this)->hasNoUnsignedSignedWrap();

1575}

1576

1578 return cast(this)->hasNoUnsignedWrap();

1579}

1580

1583

1584 return cast(this)->accumulateConstantOffset(DL, Offset);

1585}

1586

1590 APInt &ConstantOffset) const {

1591

1592 return cast(this)->collectOffset(DL, BitWidth, VariableOffsets,

1593 ConstantOffset);

1594}

1595

1596

1597

1598

1599

1600ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,

1604 ExtractElement, AllocMarker, InsertBef) {

1605 assert(isValidOperands(Val, Index) &&

1606 "Invalid extractelement instruction operands!");

1608 Op<1>() = Index;

1610}

1611

1613 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())

1614 return false;

1615 return true;

1616}

1617

1618

1619

1620

1621

1622InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,

1625 : Instruction(Vec->getType(), InsertElement, AllocMarker, InsertBef) {

1627 "Invalid insertelement instruction operands!");

1630 Op<2>() = Index;

1632}

1633

1635 const Value *Index) {

1637 return false;

1638

1639 if (Elt->getType() != cast(Vec->getType())->getElementType())

1640 return false;

1641

1642 if (!Index->getType()->isIntegerTy())

1643 return false;

1644 return true;

1645}

1646

1647

1648

1649

1650

1652 assert(V && "Cannot create placeholder of nullptr V");

1654}

1655

1659 InsertBefore) {}

1660

1665 InsertBefore) {}

1666

1673 ShuffleVector, AllocMarker, InsertBefore) {

1675 "Invalid shuffle vector instruction operands!");

1676

1683}

1684

1691 ShuffleVector, AllocMarker, InsertBefore) {

1693 "Invalid shuffle vector instruction operands!");

1698}

1699

1701 int NumOpElts = cast(Op<0>()->getType())->getNumElements();

1702 int NumMaskElts = ShuffleMask.size();

1704 for (int i = 0; i != NumMaskElts; ++i) {

1708 continue;

1709 }

1710 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");

1711 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;

1712 NewMask[i] = MaskElt;

1713 }

1716}

1717

1720

1721 if (!isa(V1->getType()) || V1->getType() != V2->getType())

1722 return false;

1723

1724

1725 int V1Size =

1726 cast(V1->getType())->getElementCount().getKnownMinValue();

1727 for (int Elem : Mask)

1729 return false;

1730

1731 if (isa(V1->getType()))

1733 return false;

1734

1735 return true;

1736}

1737

1739 const Value *Mask) {

1740

1742 return false;

1743

1744

1745

1746 auto *MaskTy = dyn_cast(Mask->getType());

1747 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||

1748 isa(MaskTy) != isa(V1->getType()))

1749 return false;

1750

1751

1752 if (isa(Mask) || isa(Mask))

1753 return true;

1754

1755

1756

1757 if (isa(MaskTy))

1758 return false;

1759

1760 unsigned V1Size = cast(V1->getType())->getNumElements();

1761

1762 if (const auto *CI = dyn_cast(Mask))

1763 return !CI->uge(V1Size * 2);

1764

1765 if (const auto *MV = dyn_cast(Mask)) {

1766 for (Value *Op : MV->operands()) {

1767 if (auto *CI = dyn_cast(Op)) {

1768 if (CI->uge(V1Size*2))

1769 return false;

1770 } else if (!isa(Op)) {

1771 return false;

1772 }

1773 }

1774 return true;

1775 }

1776

1777 if (const auto *CDS = dyn_cast(Mask)) {

1778 for (unsigned i = 0, e = cast(MaskTy)->getNumElements();

1779 i != e; ++i)

1780 if (CDS->getElementAsInteger(i) >= V1Size*2)

1781 return false;

1782 return true;

1783 }

1784

1785 return false;

1786}

1787

1790 ElementCount EC = cast(Mask->getType())->getElementCount();

1791

1792 if (isa(Mask)) {

1793 Result.resize(EC.getKnownMinValue(), 0);

1794 return;

1795 }

1796

1797 Result.reserve(EC.getKnownMinValue());

1798

1799 if (EC.isScalable()) {

1800 assert((isa(Mask) || isa(Mask)) &&

1801 "Scalable vector shuffle mask must be undef or zeroinitializer");

1802 int MaskVal = isa(Mask) ? -1 : 0;

1803 for (unsigned I = 0; I < EC.getKnownMinValue(); ++I)

1804 Result.emplace_back(MaskVal);

1805 return;

1806 }

1807

1808 unsigned NumElts = EC.getKnownMinValue();

1809

1810 if (auto *CDS = dyn_cast(Mask)) {

1811 for (unsigned i = 0; i != NumElts; ++i)

1812 Result.push_back(CDS->getElementAsInteger(i));

1813 return;

1814 }

1815 for (unsigned i = 0; i != NumElts; ++i) {

1816 Constant *C = Mask->getAggregateElement(i);

1817 Result.push_back(isa(C) ? -1 :

1818 cast(C)->getZExtValue());

1819 }

1820}

1821

1823 ShuffleMask.assign(Mask.begin(), Mask.end());

1825}

1826

1828 Type *ResultTy) {

1830 if (isa(ResultTy)) {

1833 if (Mask[0] == 0)

1836 }

1838 for (int Elem : Mask) {

1841 else

1842 MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));

1843 }

1845}

1846

1848 assert(!Mask.empty() && "Shuffle mask must contain elements");

1849 bool UsesLHS = false;

1850 bool UsesRHS = false;

1851 for (int I : Mask) {

1852 if (I == -1)

1853 continue;

1854 assert(I >= 0 && I < (NumOpElts * 2) &&

1855 "Out-of-bounds shuffle mask element");

1856 UsesLHS |= (I < NumOpElts);

1857 UsesRHS |= (I >= NumOpElts);

1858 if (UsesLHS && UsesRHS)

1859 return false;

1860 }

1861

1862 return UsesLHS || UsesRHS;

1863}

1864

1866

1867

1869}

1870

1873 return false;

1874 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {

1875 if (Mask[i] == -1)

1876 continue;

1877 if (Mask[i] != i && Mask[i] != (NumOpElts + i))

1878 return false;

1879 }

1880 return true;

1881}

1882

1884 if (Mask.size() != static_cast<unsigned>(NumSrcElts))

1885 return false;

1886

1887

1889}

1890

1892 if (Mask.size() != static_cast<unsigned>(NumSrcElts))

1893 return false;

1895 return false;

1896

1897

1898 if (NumSrcElts < 2)

1899 return false;

1900

1901 for (int I = 0, E = Mask.size(); I < E; ++I) {

1902 if (Mask[I] == -1)

1903 continue;

1904 if (Mask[I] != (NumSrcElts - 1 - I) &&

1905 Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))

1906 return false;

1907 }

1908 return true;

1909}

1910

1912 if (Mask.size() != static_cast<unsigned>(NumSrcElts))

1913 return false;

1915 return false;

1916 for (int I = 0, E = Mask.size(); I < E; ++I) {

1917 if (Mask[I] == -1)

1918 continue;

1919 if (Mask[I] != 0 && Mask[I] != NumSrcElts)

1920 return false;

1921 }

1922 return true;

1923}

1924

1926 if (Mask.size() != static_cast<unsigned>(NumSrcElts))

1927 return false;

1928

1930 return false;

1931 for (int I = 0, E = Mask.size(); I < E; ++I) {

1932 if (Mask[I] == -1)

1933 continue;

1934 if (Mask[I] != I && Mask[I] != (NumSrcElts + I))

1935 return false;

1936 }

1937 return true;

1938}

1939

1941

1942

1943

1944

1945

1946

1947 if (Mask.size() != static_cast<unsigned>(NumSrcElts))

1948 return false;

1949

1950 int Sz = Mask.size();

1952 return false;

1953

1954

1955 if (Mask[0] != 0 && Mask[0] != 1)

1956 return false;

1957

1958

1959

1960 if ((Mask[1] - Mask[0]) != NumSrcElts)

1961 return false;

1962

1963

1964

1965 for (int I = 2; I < Sz; ++I) {

1966 int MaskEltVal = Mask[I];

1967 if (MaskEltVal == -1)

1968 return false;

1969 int MaskEltPrevVal = Mask[I - 2];

1970 if (MaskEltVal - MaskEltPrevVal != 2)

1971 return false;

1972 }

1973 return true;

1974}

1975

1977 int &Index) {

1978 if (Mask.size() != static_cast<unsigned>(NumSrcElts))

1979 return false;

1980

1981 int StartIndex = -1;

1982 for (int I = 0, E = Mask.size(); I != E; ++I) {

1983 int MaskEltVal = Mask[I];

1984 if (MaskEltVal == -1)

1985 continue;

1986

1987 if (StartIndex == -1) {

1988

1989

1990 if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))

1991 return false;

1992

1993 StartIndex = MaskEltVal - I;

1994 continue;

1995 }

1996

1997

1998 if (MaskEltVal != (StartIndex + I))

1999 return false;

2000 }

2001

2002 if (StartIndex == -1)

2003 return false;

2004

2005

2006 Index = StartIndex;

2007 return true;

2008}

2009

2011 int NumSrcElts, int &Index) {

2012

2014 return false;

2015

2016

2017 if (NumSrcElts <= (int)Mask.size())

2018 return false;

2019

2020

2021 int SubIndex = -1;

2022 for (int i = 0, e = Mask.size(); i != e; ++i) {

2023 int M = Mask[i];

2024 if (M < 0)

2025 continue;

2026 int Offset = (M % NumSrcElts) - i;

2027 if (0 <= SubIndex && SubIndex != Offset)

2028 return false;

2030 }

2031

2032 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {

2033 Index = SubIndex;

2034 return true;

2035 }

2036 return false;

2037}

2038

2040 int NumSrcElts, int &NumSubElts,

2041 int &Index) {

2042 int NumMaskElts = Mask.size();

2043

2044

2045 if (NumMaskElts < NumSrcElts)

2046 return false;

2047

2048

2050 return false;

2051

2052

2056 bool Src0Identity = true;

2057 bool Src1Identity = true;

2058

2059 for (int i = 0; i != NumMaskElts; ++i) {

2060 int M = Mask[i];

2061 if (M < 0) {

2062 UndefElts.setBit(i);

2063 continue;

2064 }

2065 if (M < NumSrcElts) {

2067 Src0Identity &= (M == i);

2068 continue;

2069 }

2071 Src1Identity &= (M == (i + NumSrcElts));

2072 }

2073 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&

2074 "unknown shuffle elements");

2076 "2-source shuffle not found");

2077

2078

2079

2082 int Src0Hi = NumMaskElts - Src0Elts.countl_zero();

2083 int Src1Hi = NumMaskElts - Src1Elts.countl_zero();

2084

2085

2086

2087 if (Src0Identity) {

2088 int NumSub1Elts = Src1Hi - Src1Lo;

2089 ArrayRef Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);

2091 NumSubElts = NumSub1Elts;

2092 Index = Src1Lo;

2093 return true;

2094 }

2095 }

2096

2097

2098

2099 if (Src1Identity) {

2100 int NumSub0Elts = Src0Hi - Src0Lo;

2101 ArrayRef Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);

2103 NumSubElts = NumSub0Elts;

2104 Index = Src0Lo;

2105 return true;

2106 }

2107 }

2108

2109 return false;

2110}

2111

2113

2114

2115 if (isa(getType()))

2116 return false;

2117

2118 int NumOpElts = cast(Op<0>()->getType())->getNumElements();

2119 int NumMaskElts = cast(getType())->getNumElements();

2120 if (NumMaskElts <= NumOpElts)

2121 return false;

2122

2123

2126 return false;

2127

2128

2129 for (int i = NumOpElts; i < NumMaskElts; ++i)

2130 if (Mask[i] != -1)

2131 return false;

2132

2133 return true;

2134}

2135

2137

2138

2139 if (isa(getType()))

2140 return false;

2141

2142 int NumOpElts = cast(Op<0>()->getType())->getNumElements();

2143 int NumMaskElts = cast(getType())->getNumElements();

2144 if (NumMaskElts >= NumOpElts)

2145 return false;

2146

2148}

2149

2151

2152 if (isa(Op<0>()) || isa(Op<1>()))

2153 return false;

2154

2155

2156

2157 if (isa(getType()))

2158 return false;

2159

2160 int NumOpElts = cast(Op<0>()->getType())->getNumElements();

2161 int NumMaskElts = cast(getType())->getNumElements();

2162 if (NumMaskElts != NumOpElts * 2)

2163 return false;

2164

2165

2166

2167

2168

2170}

2171

2173 int ReplicationFactor, int VF) {

2174 assert(Mask.size() == (unsigned)ReplicationFactor * VF &&

2175 "Unexpected mask size.");

2176

2177 for (int CurrElt : seq(VF)) {

2178 ArrayRef CurrSubMask = Mask.take_front(ReplicationFactor);

2179 assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&

2180 "Run out of mask?");

2181 Mask = Mask.drop_front(ReplicationFactor);

2182 if (all\_of(CurrSubMask, [CurrElt](int MaskElt) {

2183 return MaskElt == PoisonMaskElem || MaskElt == CurrElt;

2184 }))

2185 return false;

2186 }

2187 assert(Mask.empty() && "Did not consume the whole mask?");

2188

2189 return true;

2190}

2191

2193 int &ReplicationFactor, int &VF) {

2194

2196 ReplicationFactor =

2197 Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();

2198 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)

2199 return false;

2200 VF = Mask.size() / ReplicationFactor;

2202 }

2203

2204

2205

2206

2207

2208

2209

2210

2211 int Largest = -1;

2212 for (int MaskElt : Mask) {

2214 continue;

2215

2216 if (MaskElt < Largest)

2217 return false;

2218 Largest = std::max(Largest, MaskElt);

2219 }

2220

2221

2222 for (int PossibleReplicationFactor :

2223 reverse(seq_inclusive(1, Mask.size()))) {

2224 if (Mask.size() % PossibleReplicationFactor != 0)

2225 continue;

2226 int PossibleVF = Mask.size() / PossibleReplicationFactor;

2228 PossibleVF))

2229 continue;

2230 ReplicationFactor = PossibleReplicationFactor;

2231 VF = PossibleVF;

2232 return true;

2233 }

2234

2235 return false;

2236}

2237

2239 int &VF) const {

2240

2241

2242 if (isa(getType()))

2243 return false;

2244

2245 VF = cast(Op<0>()->getType())->getNumElements();

2246 if (ShuffleMask.size() % VF != 0)

2247 return false;

2248 ReplicationFactor = ShuffleMask.size() / VF;

2249

2251}

2252

2254 if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||

2255 Mask.size() % VF != 0)

2256 return false;

2257 for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {

2260 continue;

2262 for (int Idx : SubMask) {

2264 Used.set(Idx);

2265 }

2266 if (!Used.all())

2267 return false;

2268 }

2269 return true;

2270}

2271

2272

2274

2275

2276 if (isa(getType()))

2277 return false;

2279 return false;

2280

2282}

2283

2286

2287

2288 if (!OpTy)

2289 return false;

2291

2292 return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);

2293}

2294

2296 ArrayRef Mask, unsigned Factor, unsigned NumInputElts,

2298 unsigned NumElts = Mask.size();

2299 if (NumElts % Factor)

2300 return false;

2301

2302 unsigned LaneLen = NumElts / Factor;

2304 return false;

2305

2306 StartIndexes.resize(Factor);

2307

2308

2309

2310

2311 unsigned I = 0, J;

2312 for (; I < Factor; I++) {

2313 unsigned SavedLaneValue;

2314 unsigned SavedNoUndefs = 0;

2315

2316

2317 for (J = 0; J < LaneLen - 1; J++) {

2318

2319 unsigned Lane = J * Factor + I;

2320 unsigned NextLane = Lane + Factor;

2321 int LaneValue = Mask[Lane];

2322 int NextLaneValue = Mask[NextLane];

2323

2324

2325 if (LaneValue >= 0 && NextLaneValue >= 0 &&

2326 LaneValue + 1 != NextLaneValue)

2327 break;

2328

2329

2330 if (LaneValue >= 0 && NextLaneValue < 0) {

2331 SavedLaneValue = LaneValue;

2332 SavedNoUndefs = 1;

2333 }

2334

2335

2336

2337

2338

2339

2340 if (SavedNoUndefs > 0 && LaneValue < 0) {

2341 SavedNoUndefs++;

2342 if (NextLaneValue >= 0 &&

2343 SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)

2344 break;

2345 }

2346 }

2347

2348 if (J < LaneLen - 1)

2349 return false;

2350

2351 int StartMask = 0;

2352 if (Mask[I] >= 0) {

2353

2354 StartMask = Mask[I];

2355 } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {

2356

2357 StartMask = Mask[(LaneLen - 1) * Factor + I] - J;

2358 } else if (SavedNoUndefs > 0) {

2359

2360 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);

2361 }

2362

2363

2364 if (StartMask < 0)

2365 return false;

2366

2367 if (StartMask + LaneLen > NumInputElts)

2368 return false;

2369

2370 StartIndexes[I] = StartMask;

2371 }

2372

2373 return true;

2374}

2375

2376

2377

2378

2380 unsigned Factor,

2381 unsigned &Index) {

2382

2383 for (unsigned Idx = 0; Idx < Factor; Idx++) {

2384 unsigned I = 0;

2385

2386

2387

2388 for (; I < Mask.size(); I++)

2389 if (Mask[I] >= 0 && static_cast<unsigned>(Mask[I]) != Idx + I * Factor)

2390 break;

2391

2392 if (I == Mask.size()) {

2393 Index = Idx;

2394 return true;

2395 }

2396 }

2397

2398 return false;

2399}

2400

2401

2402

2403

2404

2406 int NumElts = Mask.size();

2407 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");

2408

2409 int RotateAmt = -1;

2410 for (int i = 0; i != NumElts; i += NumSubElts) {

2411 for (int j = 0; j != NumSubElts; ++j) {

2412 int M = Mask[i + j];

2413 if (M < 0)

2414 continue;

2415 if (M < i || M >= i + NumSubElts)

2416 return -1;

2417 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;

2418 if (0 <= RotateAmt && Offset != RotateAmt)

2419 return -1;

2421 }

2422 }

2423 return RotateAmt;

2424}

2425

2427 ArrayRef Mask, unsigned EltSizeInBits, unsigned MinSubElts,

2428 unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {

2429 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {

2431 if (EltRotateAmt < 0)

2432 continue;

2433 RotateAmt = EltRotateAmt * EltSizeInBits;

2434 return true;

2435 }

2436

2437 return false;

2438}

2439

2440

2441

2442

2443

2447

2448

2449

2450

2451

2452 assert(!Idxs.empty() && "InsertValueInst must have at least one index");

2453

2455 Val->getType() && "Inserted value must match indexed type!");

2458

2461}

2462

2463InsertValueInst::InsertValueInst(const InsertValueInst &IVI)

2465 Indices(IVI.Indices) {

2469}

2470

2471

2472

2473

2474

2477

2478

2479

2480 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");

2481

2484}

2485

2486ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)

2489 Indices(EVI.Indices) {

2491}

2492

2493

2494

2495

2496

2497

2498

2501 for (unsigned Index : Idxs) {

2502

2503

2504

2505

2506

2507

2508 if (ArrayType *AT = dyn_cast(Agg)) {

2509 if (Index >= AT->getNumElements())

2510 return nullptr;

2511 Agg = AT->getElementType();

2512 } else if (StructType *ST = dyn_cast(Agg)) {

2513 if (Index >= ST->getNumElements())

2514 return nullptr;

2515 Agg = ST->getElementType(Index);

2516 } else {

2517

2518 return nullptr;

2519 }

2520 }

2521 return const_cast<Type*>(Agg);

2522}

2523

2524

2525

2526

2527

2533 AssertOK();

2534}

2535

2539}

2540

2541void UnaryOperator::AssertOK() {

2543 (void)LHS;

2544#ifndef NDEBUG

2546 case FNeg:

2548 "Unary operation should return same type as operand!");

2550 "Tried to create a floating-point operation on a "

2551 "non-floating-point type!");

2552 break;

2554 }

2555#endif

2556}

2557

2558

2559

2560

2561

2564 : Instruction(Ty, iType, AllocMarker, InsertBefore) {

2568 AssertOK();

2569}

2570

2571void BinaryOperator::AssertOK() {

2573 (void)LHS; (void)RHS;

2575 "Binary operator operand types must match!");

2576#ifndef NDEBUG

2578 case Add: case Sub:

2579 case Mul:

2581 "Arithmetic operation should return same type as operands!");

2583 "Tried to create an integer operation on a non-integer type!");

2584 break;

2585 case FAdd: case FSub:

2588 "Arithmetic operation should return same type as operands!");

2590 "Tried to create a floating-point operation on a "

2591 "non-floating-point type!");

2592 break;

2593 case UDiv:

2594 case SDiv:

2596 "Arithmetic operation should return same type as operands!");

2598 "Incorrect operand type (not integer) for S/UDIV");

2599 break;

2600 case FDiv:

2602 "Arithmetic operation should return same type as operands!");

2604 "Incorrect operand type (not floating point) for FDIV");

2605 break;

2606 case URem:

2607 case SRem:

2609 "Arithmetic operation should return same type as operands!");

2611 "Incorrect operand type (not integer) for S/UREM");

2612 break;

2613 case FRem:

2615 "Arithmetic operation should return same type as operands!");

2617 "Incorrect operand type (not floating point) for FREM");

2618 break;

2619 case Shl:

2620 case LShr:

2621 case AShr:

2623 "Shift operation should return same type as operands!");

2625 "Tried to create a shift operation on a non-integral type!");

2626 break;

2627 case And: case Or:

2628 case Xor:

2630 "Logical operation should return same type as operands!");

2632 "Tried to create a logical operation on a non-integral type!");

2633 break;

2635 }

2636#endif

2637}

2638

2643 "Cannot create binary operator with two operands of differing type!");

2645}

2646

2649 Value *Zero = ConstantInt::get(Op->getType(), 0);

2651 InsertBefore);

2652}

2653

2656 Value *Zero = ConstantInt::get(Op->getType(), 0);

2657 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);

2658}

2659

2664 Op->getType(), Name, InsertBefore);

2665}

2666

2667

2668

2669

2670

2673 return true;

2675 return false;

2676}

2677

2678

2679

2680

2681

2684 cast(this)->getMetadata(LLVMContext::MD_fpmath);

2685 if (!MD)

2686 return 0.0;

2689}

2690

2691

2692

2693

2694

2695

2698 default: return false;

2699 case Instruction::ZExt:

2700 case Instruction::SExt:

2701 case Instruction::Trunc:

2702 return true;

2703 case Instruction::BitCast:

2706 }

2707}

2708

2709

2710

2711

2712

2713

2714

2715

2716

2718 Type *SrcTy,

2719 Type *DestTy,

2721 assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");

2722 switch (Opcode) {

2724 case Instruction::Trunc:

2725 case Instruction::ZExt:

2726 case Instruction::SExt:

2727 case Instruction::FPTrunc:

2728 case Instruction::FPExt:

2729 case Instruction::UIToFP:

2730 case Instruction::SIToFP:

2731 case Instruction::FPToUI:

2732 case Instruction::FPToSI:

2733 case Instruction::AddrSpaceCast:

2734

2735 return false;

2736 case Instruction::BitCast:

2737 return true;

2738 case Instruction::PtrToInt:

2739 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==

2741 case Instruction::IntToPtr:

2742 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==

2744 }

2745}

2746

2749}

2750

2751

2752

2753

2754

2755

2756

2757

2758

2761 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,

2762 Type *DstIntPtrTy) {

2763

2764

2765

2766

2767

2768

2769

2770

2771

2772

2773

2774

2775

2776

2777

2778

2779

2780

2781

2782

2783

2784

2785

2786

2787

2788

2789

2790

2791

2792

2793 const unsigned numCastOps =

2794 Instruction::CastOpsEnd - Instruction::CastOpsBegin;

2795 static const uint8_t CastResults[numCastOps][numCastOps] = {

2796

2797

2798

2799

2800

2801 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0},

2802 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0},

2803 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0},

2804 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0},

2805 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0},

2806 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0},

2807 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0},

2808 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0},

2809 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0},

2810 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0},

2811 { 99,99,99,99,99,99,99,99,99,11,99,15, 0},

2812 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16, 5, 1,14},

2813 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12},

2814 };

2815

2816

2817

2818

2819

2820 bool IsFirstBitcast = (firstOp == Instruction::BitCast);

2821 bool IsSecondBitcast = (secondOp == Instruction::BitCast);

2822 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;

2823

2824

2825 if ((IsFirstBitcast && isa(SrcTy) != isa(MidTy)) ||

2826 (IsSecondBitcast && isa(MidTy) != isa(DstTy)))

2827 if (!AreBothBitcasts)

2828 return 0;

2829

2830 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]

2831 [secondOp-Instruction::CastOpsBegin];

2832 switch (ElimCase) {

2833 case 0:

2834

2835 return 0;

2836 case 1:

2837

2838 return firstOp;

2839 case 2:

2840

2841 return secondOp;

2842 case 3:

2843

2844

2845

2847 return firstOp;

2848 return 0;

2849 case 4:

2850

2851

2852 if (DstTy == MidTy)

2853 return firstOp;

2854 return 0;

2855 case 5:

2856

2857

2859 return secondOp;

2860 return 0;

2861 case 7: {

2862

2864 return 0;

2865

2866

2868 return 0;

2869

2871

2872

2873

2874

2875 if (MidSize == 64)

2876 return Instruction::BitCast;

2877

2878

2879 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)

2880 return 0;

2882 if (MidSize >= PtrSize)

2883 return Instruction::BitCast;

2884 return 0;

2885 }

2886 case 8: {

2887

2888

2889

2892 if (SrcTy == DstTy)

2893 return Instruction::BitCast;

2894 if (SrcSize < DstSize)

2895 return firstOp;

2896 if (SrcSize > DstSize)

2897 return secondOp;

2898 return 0;

2899 }

2900 case 9:

2901

2902 return Instruction::ZExt;

2903 case 11: {

2904

2905 if (!MidIntPtrTy)

2906 return 0;

2910 if (SrcSize <= PtrSize && SrcSize == DstSize)

2911 return Instruction::BitCast;

2912 return 0;

2913 }

2914 case 12:

2915

2916

2918 return Instruction::AddrSpaceCast;

2919 return Instruction::BitCast;

2920 case 13:

2921

2922

2923

2930 "Illegal addrspacecast, bitcast sequence!");

2931

2932 return firstOp;

2933 case 14:

2934

2935 return Instruction::AddrSpaceCast;

2936 case 15:

2937

2938

2939

2945 "Illegal inttoptr, bitcast sequence!");

2946

2947 return firstOp;

2948 case 16:

2949

2950

2951

2957 "Illegal bitcast, ptrtoint sequence!");

2958

2959 return secondOp;

2960 case 17:

2961

2962 return Instruction::UIToFP;

2963 case 99:

2964

2965

2967 default:

2969 }

2970}

2971

2975

2976 switch (op) {

2977 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);

2978 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);

2979 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);

2980 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);

2981 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);

2982 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);

2983 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);

2984 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);

2985 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);

2986 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);

2987 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);

2988 case BitCast:

2990 case AddrSpaceCast:

2992 default:

2994 }

2995}

2996

3000 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);

3001 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);

3002}

3003

3007 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);

3008 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);

3009}

3010

3014 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);

3015 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);

3016}

3017

3018

3023 "Invalid cast");

3026 cast(Ty)->getElementCount() ==

3027 cast(S->getType())->getElementCount()) &&

3028 "Invalid cast");

3029

3031 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);

3032

3034}

3035

3040

3042 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);

3043

3044 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);

3045}

3046

3051 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);

3053 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);

3054

3055 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);

3056}

3057

3062 "Invalid integer cast");

3063 unsigned SrcBits = C->getType()->getScalarSizeInBits();

3066 (SrcBits == DstBits ? Instruction::BitCast :

3067 (SrcBits > DstBits ? Instruction::Trunc :

3068 (isSigned ? Instruction::SExt : Instruction::ZExt)));

3069 return Create(opcode, C, Ty, Name, InsertBefore);

3070}

3071

3075 "Invalid cast");

3076 unsigned SrcBits = C->getType()->getScalarSizeInBits();

3078 assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");

3080 (SrcBits == DstBits ? Instruction::BitCast :

3081 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));

3082 return Create(opcode, C, Ty, Name, InsertBefore);

3083}

3084

3087 return false;

3088

3089 if (SrcTy == DestTy)

3090 return true;

3091

3092 if (VectorType *SrcVecTy = dyn_cast(SrcTy)) {

3093 if (VectorType *DestVecTy = dyn_cast(DestTy)) {

3094 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {

3095

3096 SrcTy = SrcVecTy->getElementType();

3097 DestTy = DestVecTy->getElementType();

3098 }

3099 }

3100 }

3101

3102 if (PointerType *DestPtrTy = dyn_cast(DestTy)) {

3103 if (PointerType *SrcPtrTy = dyn_cast(SrcTy)) {

3104 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();

3105 }

3106 }

3107

3110

3111

3112

3114 return false;

3115

3116 if (SrcBits != DestBits)

3117 return false;

3118

3119 return true;

3120}

3121

3124

3125 if (auto *PtrTy = dyn_cast(SrcTy))

3126 if (auto *IntTy = dyn_cast(DestTy))

3127 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&

3128 DL.isNonIntegralPointerType(PtrTy));

3129 if (auto *PtrTy = dyn_cast(DestTy))

3130 if (auto *IntTy = dyn_cast(SrcTy))

3131 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&

3132 DL.isNonIntegralPointerType(PtrTy));

3133

3135}

3136

3137

3138

3139

3140

3141

3142

3145 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {

3146 Type *SrcTy = Src->getType();

3147

3149 "Only first class types are castable!");

3150

3151 if (SrcTy == DestTy)

3152 return BitCast;

3153

3154

3155 if (VectorType *SrcVecTy = dyn_cast(SrcTy))

3156 if (VectorType *DestVecTy = dyn_cast(DestTy))

3157 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {

3158

3159

3160 SrcTy = SrcVecTy->getElementType();

3161 DestTy = DestVecTy->getElementType();

3162 }

3163

3164

3167

3168

3169 if (DestTy->isIntegerTy()) {

3170 if (SrcTy->isIntegerTy()) {

3171 if (DestBits < SrcBits)

3172 return Trunc;

3173 else if (DestBits > SrcBits) {

3174 if (SrcIsSigned)

3175 return SExt;

3176 else

3177 return ZExt;

3178 } else {

3179 return BitCast;

3180 }

3181 } else if (SrcTy->isFloatingPointTy()) {

3182 if (DestIsSigned)

3183 return FPToSI;

3184 else

3185 return FPToUI;

3187 assert(DestBits == SrcBits &&

3188 "Casting vector to integer of different width");

3189 return BitCast;

3190 } else {

3192 "Casting from a value that is not first-class type");

3193 return PtrToInt;

3194 }

3195 } else if (DestTy->isFloatingPointTy()) {

3196 if (SrcTy->isIntegerTy()) {

3197 if (SrcIsSigned)

3198 return SIToFP;

3199 else

3200 return UIToFP;

3201 } else if (SrcTy->isFloatingPointTy()) {

3202 if (DestBits < SrcBits) {

3203 return FPTrunc;

3204 } else if (DestBits > SrcBits) {

3205 return FPExt;

3206 } else {

3207 return BitCast;

3208 }

3210 assert(DestBits == SrcBits &&

3211 "Casting vector to floating point of different width");

3212 return BitCast;

3213 }

3214 llvm_unreachable("Casting pointer or non-first class to float");

3216 assert(DestBits == SrcBits &&

3217 "Illegal cast to vector (wrong type or size)");

3218 return BitCast;

3222 return AddrSpaceCast;

3223 return BitCast;

3225 return IntToPtr;

3226 }

3227 llvm_unreachable("Casting pointer to other than pointer or int");

3228 }

3229 llvm_unreachable("Casting to type that is not first-class");

3230}

3231

3232

3233

3234

3235

3236

3237

3238

3239

3240bool

3244 return false;

3245

3246

3247

3248 bool SrcIsVec = isa(SrcTy);

3249 bool DstIsVec = isa(DstTy);

3252

3253

3254

3255

3256 ElementCount SrcEC = SrcIsVec ? cast(SrcTy)->getElementCount()

3258 ElementCount DstEC = DstIsVec ? cast(DstTy)->getElementCount()

3260

3261

3262 switch (op) {

3263 default: return false;

3264 case Instruction::Trunc:

3266 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;

3267 case Instruction::ZExt:

3269 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;

3270 case Instruction::SExt:

3272 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;

3273 case Instruction::FPTrunc:

3275 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;

3276 case Instruction::FPExt:

3278 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;

3279 case Instruction::UIToFP:

3280 case Instruction::SIToFP:

3282 SrcEC == DstEC;

3283 case Instruction::FPToUI:

3284 case Instruction::FPToSI:

3286 SrcEC == DstEC;

3287 case Instruction::PtrToInt:

3288 if (SrcEC != DstEC)

3289 return false;

3291 case Instruction::IntToPtr:

3292 if (SrcEC != DstEC)

3293 return false;

3295 case Instruction::BitCast: {

3298

3299

3300

3301 if (!SrcPtrTy != !DstPtrTy)

3302 return false;

3303

3304

3305

3306 if (!SrcPtrTy)

3308

3309

3311 return false;

3312

3313

3314 if (SrcIsVec && DstIsVec)

3315 return SrcEC == DstEC;

3316 if (SrcIsVec)

3318 if (DstIsVec)

3320

3321 return true;

3322 }

3323 case Instruction::AddrSpaceCast: {

3325 if (!SrcPtrTy)

3326 return false;

3327

3329 if (!DstPtrTy)

3330 return false;

3331

3333 return false;

3334

3335 return SrcEC == DstEC;

3336 }

3337 }

3338}

3339

3342 : CastInst(Ty, Trunc, S, Name, InsertBefore) {

3344}

3345

3348 : CastInst(Ty, ZExt, S, Name, InsertBefore) {

3350}

3351

3354 : CastInst(Ty, SExt, S, Name, InsertBefore) {

3356}

3357

3360 : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {

3362}

3363

3366 : CastInst(Ty, FPExt, S, Name, InsertBefore) {

3368}

3369

3372 : CastInst(Ty, UIToFP, S, Name, InsertBefore) {

3374}

3375

3378 : CastInst(Ty, SIToFP, S, Name, InsertBefore) {

3380}

3381

3384 : CastInst(Ty, FPToUI, S, Name, InsertBefore) {

3386}

3387

3390 : CastInst(Ty, FPToSI, S, Name, InsertBefore) {

3392}

3393

3396 : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {

3398}

3399

3402 : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {

3404}

3405

3408 : CastInst(Ty, BitCast, S, Name, InsertBefore) {

3410}

3411

3414 : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {

3416}

3417

3418

3419

3420

3421

3425 : Instruction(ty, op, AllocMarker, InsertBefore) {

3430 if (FlagsSource)

3432}

3433

3436 if (Op == Instruction::ICmp) {

3437 if (InsertBefore.isValid())

3440 else

3443 }

3444

3445 if (InsertBefore.isValid())

3448 else

3451}

3452

3460 return Inst;

3461}

3462

3464 if (ICmpInst *IC = dyn_cast(this))

3465 IC->swapOperands();

3466 else

3467 cast(this)->swapOperands();

3468}

3469

3471 if (const ICmpInst *IC = dyn_cast(this))

3472 return IC->isCommutative();

3473 return cast(this)->isCommutative();

3474}

3475

3482}

3483

3484

3485

3487 auto *LHS = dyn_cast(Cmp->getOperand(0));

3488 auto *RHS = dyn_cast(Cmp->getOperand(1));

3489 if (auto *Const = LHS ? LHS : RHS) {

3492 }

3493 return false;

3494}

3495

3496

3497

3498

3502 return true;

3505 return false;

3506 [[fallthrough]];

3509 default:

3510 return false;

3511 }

3512}

3513

3515 switch (pred) {

3527

3544 }

3545}

3546

3548 switch (Pred) {

3549 default: return "unknown";

3576 }

3577}

3578

3581 return OS;

3582}

3583

3585 switch (pred) {

3589 return pred;

3594 }

3595}

3596

3598 switch (pred) {

3602 return pred;

3607 }

3608}

3609

3611 switch (pred) {

3614 return pred;

3623

3628 return pred;

3637 }

3638}

3639

3641 switch (pred) {

3650 return true;

3651 default:

3652 return false;

3653 }

3654}

3655

3657 switch (pred) {

3666 return true;

3667 default:

3668 return false;

3669 }

3670}

3671

3673 switch (pred) {

3690 default:

3691 return pred;

3692 }

3693}

3694

3696 switch (pred) {

3713 default:

3714 return pred;

3715 }

3716}

3717

3720

3725

3727}

3728

3730 switch (predicate) {

3731 default: return false;

3734 }

3735}

3736

3738 switch (predicate) {

3739 default: return false;

3742 }

3743}

3744

3748 switch (Pred) {

3754 return LHS.ugt(RHS);

3756 return LHS.uge(RHS);

3758 return LHS.ult(RHS);

3760 return LHS.ule(RHS);

3762 return LHS.sgt(RHS);

3764 return LHS.sge(RHS);

3766 return LHS.slt(RHS);

3768 return LHS.sle(RHS);

3769 default:

3771 };

3772}

3773

3777 switch (Pred) {

3778 default:

3781 return false;

3783 return true;

3812 }

3813}

3814

3818 switch (Pred) {

3839 default:

3841 }

3842}

3843

3846 return pred;

3851

3853}

3854

3856 switch (predicate) {

3857 default: return false;

3861 }

3862}

3863

3865 switch (predicate) {

3866 default: return false;

3870 }

3871}

3872

3874 switch(predicate) {

3875 default: return false;

3878 }

3879}

3880

3882 switch(predicate) {

3885 default: return false;

3886 }

3887}

3888

3890

3891

3893 return true;

3894

3899

3900 switch (Pred1) {

3901 default:

3902 break;

3904

3907 case CmpInst::ICMP_UGT:

3909 case CmpInst::ICMP_ULT:

3911 case CmpInst::ICMP_SGT:

3913 case CmpInst::ICMP_SLT:

3915 }

3916 return false;

3917}

3918

3923}

3924

3928 return true;

3930 return false;

3931 return std::nullopt;

3932}

3933

3934

3935

3936

3937

3940 if (A.Pred == B.Pred)

3941 return A.HasSameSign == B.HasSameSign ? A : CmpPredicate(A.Pred);

3943 return {};

3944 if (A.HasSameSign &&

3946 return B.Pred;

3947 if (B.HasSameSign &&

3949 return A.Pred;

3950 return {};

3951}

3952

3955}

3956

3958 if (auto *ICI = dyn_cast(Cmp))

3959 return ICI->getCmpPredicate();

3960 return Cmp->getPredicate();

3961}

3962

3965}

3966

3969}

3970

3971

3972

3973

3974

3977 ReservedSpace = NumReserved;

3980

3983}

3984

3985

3986

3987

3988

3992 AllocMarker, InsertBefore) {

3994}

3995

3996SwitchInst::SwitchInst(const SwitchInst &SI)

3998 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());

3999 setNumHungOffUseOperands(SI.getNumOperands());

4000 Use *OL = getOperandList();

4001 const Use *InOL = SI.getOperandList();

4002 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {

4003 OL[i] = InOL[i];

4004 OL[i+1] = InOL[i+1];

4005 }

4006 SubclassOptionalData = SI.SubclassOptionalData;

4007}

4008

4009

4010

4014 if (OpNo+2 > ReservedSpace)

4015 growOperands();

4016

4017 assert(OpNo+1 < ReservedSpace && "Growing didn't work!");

4022}

4023

4024

4025

4027 unsigned idx = I->getCaseIndex();

4028

4030

4033

4034

4035 if (2 + (idx + 1) * 2 != NumOps) {

4036 OL[2 + idx * 2] = OL[NumOps - 2];

4037 OL[2 + idx * 2 + 1] = OL[NumOps - 1];

4038 }

4039

4040

4041 OL[NumOps-2].set(nullptr);

4042 OL[NumOps-2+1].set(nullptr);

4044

4045 return CaseIt(this, idx);

4046}

4047

4048

4049

4050

4051void SwitchInst::growOperands() {

4053 unsigned NumOps = e*3;

4054

4055 ReservedSpace = NumOps;

4057}

4058

4060 assert(Changed && "called only if metadata has changed");

4061

4062 if (!Weights)

4063 return nullptr;

4064

4065 assert(SI.getNumSuccessors() == Weights->size() &&

4066 "num of prof branch_weights must accord with num of successors");

4067

4068 bool AllZeroes = all_of(*Weights, [](uint32_t W) { return W == 0; });

4069

4070 if (AllZeroes || Weights->size() < 2)

4071 return nullptr;

4072

4074}

4075

4078 if (!ProfileData)

4079 return;

4080

4082 llvm_unreachable("number of prof branch_weights metadata operands does "

4083 "not correspond to number of succesors");

4084 }

4085

4088 return;

4089 this->Weights = std::move(Weights);

4090}

4091

4094 if (Weights) {

4095 assert(SI.getNumSuccessors() == Weights->size() &&

4096 "num of prof branch_weights must accord with num of successors");

4097 Changed = true;

4098

4099

4100

4101 (*Weights)[I->getCaseIndex() + 1] = Weights->back();

4102 Weights->pop_back();

4103 }

4104 return SI.removeCase(I);

4105}

4106

4110 SI.addCase(OnVal, Dest);

4111

4112 if (!Weights && W && *W) {

4113 Changed = true;

4115 (*Weights)[SI.getNumSuccessors() - 1] = *W;

4116 } else if (Weights) {

4117 Changed = true;

4118 Weights->push_back(W.value_or(0));

4119 }

4120 if (Weights)

4121 assert(SI.getNumSuccessors() == Weights->size() &&

4122 "num of prof branch_weights must accord with num of successors");

4123}

4124

4127

4128 Changed = false;

4129 if (Weights)

4130 Weights->resize(0);

4131 return SI.eraseFromParent();

4132}

4133

4136 if (!Weights)

4137 return std::nullopt;

4138 return (*Weights)[idx];

4139}

4140

4143 if (!W)

4144 return;

4145

4146 if (!Weights && *W)

4148

4149 if (Weights) {

4150 auto &OldW = (*Weights)[idx];

4151 if (*W != OldW) {

4152 Changed = true;

4153 OldW = *W;

4154 }

4155 }

4156}

4157

4160 unsigned idx) {

4162 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)

4163 return mdconst::extract(ProfileData->getOperand(idx + 1))

4164 ->getValue()

4165 .getZExtValue();

4166

4167 return std::nullopt;

4168}

4169

4170

4171

4172

4173

4174void IndirectBrInst::init(Value *Address, unsigned NumDests) {

4176 "Address of indirectbr must be a pointer");

4177 ReservedSpace = 1+NumDests;

4180

4182}

4183

4184

4185

4186

4187

4188void IndirectBrInst::growOperands() {

4190 unsigned NumOps = e*2;

4191

4192 ReservedSpace = NumOps;

4194}

4195

4196IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,

4199 Instruction::IndirectBr, AllocMarker, InsertBefore) {

4201}

4202

4203IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)

4205 AllocMarker) {

4208 Use *OL = getOperandList();

4210 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)

4211 OL[i] = InOL[i];

4213}

4214

4215

4216

4219 if (OpNo+1 > ReservedSpace)

4220 growOperands();

4221

4222 assert(OpNo < ReservedSpace && "Growing didn't work!");

4225}

4226

4227

4228

4231

4234

4235

4236 OL[idx+1] = OL[NumOps-1];

4237

4238

4239 OL[NumOps-1].set(nullptr);

4241}

4242

4243

4244

4245

4246

4250}

4251

4252

4253

4254

4255

4256

4257

4258

4262}

4263

4266}

4267

4270}

4271

4274}

4275

4278}

4279

4282}

4283

4286}

4287

4293 return Result;

4294}

4295

4299}

4300

4304}

4305

4311 Result->setWeak(isWeak());

4312 return Result;

4313}

4314

4320 return Result;

4321}

4322

4325}

4326

4329}

4330

4333}

4334

4337}

4338

4341}

4342

4345}

4346

4349}

4350

4353}

4354

4357}

4358

4361}

4362

4365}

4366

4369}

4370

4373}

4374

4377}

4378

4384 return new (AllocMarker) CallInst(*this, AllocMarker);

4385 }

4387 return new (AllocMarker) CallInst(*this, AllocMarker);

4388}

4389

4392}

4393

4396}

4397

4400}

4401

4404}

4405

4408}

4409

4411

4414}

4415

4418 return new (AllocMarker) ReturnInst(*this, AllocMarker);

4419}

4420

4423 return new (AllocMarker) BranchInst(*this, AllocMarker);

4424}

4425

4427

4430}

4431

4437 return new (AllocMarker) InvokeInst(*this, AllocMarker);

4438 }

4440 return new (AllocMarker) InvokeInst(*this, AllocMarker);

4441}

4442

4448 return new (AllocMarker) CallBrInst(*this, AllocMarker);

4449 }

4451 return new (AllocMarker) CallBrInst(*this, AllocMarker);

4452}

4453

4455 return new (AllocMarker) ResumeInst(*this);

4456}

4457

4461}

4462

4465}

4466

4469}

4470

4473 return new (AllocMarker) FuncletPadInst(*this, AllocMarker);

4474}

4475

4479}

4480

4483}

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

Atomic ordering constants.

This file contains the simple types necessary to represent the attributes associated with functions a...

static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")

static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")

This file contains the declarations for the subclasses of Constant, which represent the different fla...

Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx

static bool isSigned(unsigned int Opcode)

Module.h This file contains the declarations for the Module class.

static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos)

static bool isImpliedFalseByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)

static Value * createPlaceholderForShuffleVector(Value *V)

static Align computeAllocaDefaultAlign(Type *Ty, InsertPosition Pos)

static cl::opt< bool > DisableI2pP2iOpt("disable-i2p-p2i-opt", cl::init(false), cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"))

static bool hasNonZeroFPOperands(const CmpInst *Cmp)

static int matchShuffleAsBitRotate(ArrayRef< int > Mask, int NumSubElts)

Try to lower a vector shuffle as a bit rotation.

static Type * getIndexedTypeInternal(Type *Ty, ArrayRef< IndexTy > IdxList)

static bool isReplicationMaskWithParams(ArrayRef< int > Mask, int ReplicationFactor, int VF)

static bool isIdentityMaskImpl(ArrayRef< int > Mask, int NumOpElts)

static bool isSingleSourceMaskImpl(ArrayRef< int > Mask, int NumOpElts)

static Value * getAISize(LLVMContext &Context, Value *Amt)

static bool isImpliedTrueByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)

uint64_t IntrinsicInst * II

PowerPC Reduce CR logical Operation

This file contains the declarations for profiling metadata utility functions.

const SmallVectorImpl< MachineOperand > & Cond

assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())

static unsigned getNumElements(Type *Ty)

This file implements the SmallBitVector class.

This file defines the SmallVector class.

static SymbolRef::Type getType(const Symbol *Sym)

static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)

Returns the opcode of Values or ~0 if they do not all agree.

float convertToFloat() const

Converts this APFloat to host float value.

Class for arbitrary precision integers.

void setBit(unsigned BitPosition)

Set the given bit to 1 whose position is given as "bitPosition".

bool isZero() const

Determine if this value is zero, i.e. all bits are clear.

unsigned countr_zero() const

Count the number of trailing zero bits.

unsigned countl_zero() const

The APInt version of std::countl_zero.

static APInt getZero(unsigned numBits)

Get the '0' value for the specified bit-width.

This class represents a conversion between pointers from one address space to another.

AddrSpaceCastInst * cloneImpl() const

Clone an identical AddrSpaceCastInst.

AddrSpaceCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Constructor with insert-before-instruction semantics.

an instruction to allocate memory on the stack

std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const

Get allocation size in bits.

bool isSwiftError() const

Return true if this alloca is used as a swifterror argument to a call.

bool isStaticAlloca() const

Return true if this alloca is in the entry block of the function and is a constant size.

Align getAlign() const

Return the alignment of the memory that is being allocated by the instruction.

AllocaInst * cloneImpl() const

Type * getAllocatedType() const

Return the type that is being allocated by the instruction.

bool isUsedWithInAlloca() const

Return true if this alloca is used as an inalloca argument to a call.

unsigned getAddressSpace() const

Return the address space for the allocation.

std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const

Get allocation size in bytes.

bool isArrayAllocation() const

Return true if there is an allocation size parameter to the allocation instruction that is not 1.

void setAlignment(Align Align)

const Value * getArraySize() const

Get the number of elements allocated.

AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, const Twine &Name, InsertPosition InsertBefore)

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

size_t size() const

size - Get the array size.

bool empty() const

empty - Check if the array is empty.

ArrayRef< T > slice(size_t N, size_t M) const

slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.

Class to represent array types.

An instruction that atomically checks whether a specified value is in a memory location,...

void setSyncScopeID(SyncScope::ID SSID)

Sets the synchronization scope ID of this cmpxchg instruction.

bool isVolatile() const

Return true if this is a cmpxchg from a volatile memory location.

void setFailureOrdering(AtomicOrdering Ordering)

Sets the failure ordering constraint of this cmpxchg instruction.

AtomicOrdering getFailureOrdering() const

Returns the failure ordering constraint of this cmpxchg instruction.

void setSuccessOrdering(AtomicOrdering Ordering)

Sets the success ordering constraint of this cmpxchg instruction.

AtomicCmpXchgInst * cloneImpl() const

Align getAlign() const

Return the alignment of the memory that is being allocated by the instruction.

bool isWeak() const

Return true if this cmpxchg may spuriously fail.

void setAlignment(Align Align)

AtomicOrdering getSuccessOrdering() const

Returns the success ordering constraint of this cmpxchg instruction.

SyncScope::ID getSyncScopeID() const

Returns the synchronization scope ID of this cmpxchg instruction.

AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)

an instruction that atomically reads a memory location, combines it with another value,...

Align getAlign() const

Return the alignment of the memory that is being allocated by the instruction.

AtomicRMWInst * cloneImpl() const

bool isVolatile() const

Return true if this is a RMW on a volatile memory location.

BinOp

This enumeration lists the possible modifications atomicrmw can make.

@ USubCond

Subtract only if no unsigned overflow.

@ Min

*p = old <signed v ? old : v

@ USubSat

*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.

@ UIncWrap

Increment one up to a maximum value.

@ Max

*p = old >signed v ? old : v

@ UMin

*p = old <unsigned v ? old : v

@ FMin

*p = minnum(old, v) minnum matches the behavior of llvm.minnum.

@ UMax

*p = old >unsigned v ? old : v

@ FMax

*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.

@ UDecWrap

Decrement one until a minimum value or zero.

void setSyncScopeID(SyncScope::ID SSID)

Sets the synchronization scope ID of this rmw instruction.

void setOrdering(AtomicOrdering Ordering)

Sets the ordering constraint of this rmw instruction.

void setOperation(BinOp Operation)

BinOp getOperation() const

AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, AtomicOrdering Ordering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)

SyncScope::ID getSyncScopeID() const

Returns the synchronization scope ID of this rmw instruction.

void setAlignment(Align Align)

static StringRef getOperationName(BinOp Op)

AtomicOrdering getOrdering() const

Returns the ordering constraint of this rmw instruction.

bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const

Return true if the specified attribute is set for at least one parameter or for the return value.

FPClassTest getRetNoFPClass() const

Get the disallowed floating-point classes of the return value.

bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const

Return true if the attribute exists for the given argument.

FPClassTest getParamNoFPClass(unsigned ArgNo) const

Get the disallowed floating-point classes of the argument value.

MemoryEffects getMemoryEffects() const

Returns memory effects of the function.

const ConstantRange & getRange() const

Returns the value of the range attribute.

AttrKind

This enumeration lists the attributes that can be associated with parameters, function results,...

static Attribute getWithMemoryEffects(LLVMContext &Context, MemoryEffects ME)

bool isValid() const

Return true if the attribute is any kind of attribute.

LLVM Basic Block Representation.

bool isEntryBlock() const

Return true if this is the entry block of the containing function.

const Function * getParent() const

Return the enclosing method, or null if none.

const DataLayout & getDataLayout() const

Get the data layout of the module this basic block belongs to.

static BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...

BinaryOps getOpcode() const

bool swapOperands()

Exchange the two operands to this instruction.

static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)

static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)

Construct a binary instruction, given the opcode and the two operands.

BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty, const Twine &Name, InsertPosition InsertBefore)

static BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)

BinaryOperator * cloneImpl() const

This class represents a no-op cast from one type to another.

BitCastInst * cloneImpl() const

Clone an identical BitCastInst.

BitCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Constructor with insert-before-instruction semantics.

Conditional or Unconditional Branch instruction.

void swapSuccessors()

Swap the successors of this branch instruction.

BranchInst * cloneImpl() const

bool isConditional() const

Value * getCondition() const

Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...

FPClassTest getParamNoFPClass(unsigned i) const

Extract a test mask for disallowed floating-point value classes for the parameter.

bool isInlineAsm() const

Check if this call is an inline asm statement.

BundleOpInfo & getBundleOpInfoForOperand(unsigned OpIdx)

Return the BundleOpInfo for the operand at index OpIdx.

Attribute getRetAttr(Attribute::AttrKind Kind) const

Return the attribute for the given attribute kind for the return value.

void setCallingConv(CallingConv::ID CC)

FPClassTest getRetNoFPClass() const

Extract a test mask for disallowed floating-point value classes for the return value.

bundle_op_iterator bundle_op_info_begin()

Return the start of the list of BundleOpInfo instances associated with this OperandBundleUser.

MemoryEffects getMemoryEffects() const

void addFnAttr(Attribute::AttrKind Kind)

Adds the attribute to the function.

bool doesNotAccessMemory() const

Determine if the call does not access memory.

void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const

Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.

void setOnlyAccessesArgMemory()

OperandBundleUse getOperandBundleAt(unsigned Index) const

Return the operand bundle at a specific index.

void setOnlyAccessesInaccessibleMemOrArgMem()

std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const

Return an operand bundle by name, if present.

Function * getCalledFunction() const

Returns the function called, or null if this is an indirect function invocation or the function signa...

void setDoesNotAccessMemory()

bool hasRetAttr(Attribute::AttrKind Kind) const

Determine whether the return value has the given attribute.

bool onlyAccessesInaccessibleMemory() const

Determine if the function may only access memory that is inaccessible from the IR.

unsigned getNumOperandBundles() const

Return the number of operand bundles associated with this User.

CallingConv::ID getCallingConv() const

bundle_op_iterator bundle_op_info_end()

Return the end of the list of BundleOpInfo instances associated with this OperandBundleUser.

unsigned getNumSubclassExtraOperandsDynamic() const

Get the number of extra operands for instructions that don't have a fixed number of extra operands.

bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const

Determine whether the argument or parameter has the given attribute.

User::op_iterator arg_begin()

Return the iterator pointing to the beginning of the argument list.

bool isMustTailCall() const

Tests if this call site must be tail call optimized.

bool isIndirectCall() const

Return true if the callsite is an indirect call.

bool onlyReadsMemory() const

Determine if the call does not access or only reads memory.

iterator_range< bundle_op_iterator > bundle_op_infos()

Return the range [bundle_op_info_begin, bundle_op_info_end).

void setOnlyReadsMemory()

static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)

Create a clone of CB with operand bundle OB added.

bool onlyAccessesInaccessibleMemOrArgMem() const

Determine if the function may only access memory that is either inaccessible from the IR or pointed t...

Value * getCalledOperand() const

void setOnlyWritesMemory()

op_iterator populateBundleOperandInfos(ArrayRef< OperandBundleDef > Bundles, const unsigned BeginIndex)

Populate the BundleOpInfo instances and the Use& vector from Bundles.

AttributeList Attrs

parameter attributes for callable

bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const

Return true if this operand bundle user contains operand bundles with tags other than those specified...

std::optional< ConstantRange > getRange() const

If this return value has a range attribute, return the value range of the argument.

bool isReturnNonNull() const

Return true if the return value is known to be not null.

Value * getArgOperand(unsigned i) const

uint64_t getRetDereferenceableBytes() const

Extract the number of dereferenceable bytes for a call or parameter (0=unknown).

User::op_iterator arg_end()

Return the iterator pointing to the end of the argument list.

FunctionType * getFunctionType() const

Intrinsic::ID getIntrinsicID() const

Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...

static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)

Return the total number of values used in Bundles.

Value * getArgOperandWithAttribute(Attribute::AttrKind Kind) const

If one of the arguments has the specified attribute, returns its operand value.

void setOnlyAccessesInaccessibleMemory()

static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)

Create a clone of CB with a different set of operand bundles and insert it before InsertPt.

bool onlyWritesMemory() const

Determine if the call does not access or only writes memory.

bool hasClobberingOperandBundles() const

Return true if this operand bundle user has operand bundles that may write to the heap.

void setCalledOperand(Value *V)

static CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, InsertPosition InsertPt=nullptr)

Create a clone of CB with operand bundle ID removed.

bool hasReadingOperandBundles() const

Return true if this operand bundle user has operand bundles that may read from the heap.

bool onlyAccessesArgMemory() const

Determine if the call can access memmory only using pointers based on its arguments.

unsigned arg_size() const

AttributeList getAttributes() const

Return the attributes for this call.

void setMemoryEffects(MemoryEffects ME)

bool hasOperandBundles() const

Return true if this User has any operand bundles.

bool isTailCall() const

Tests if this call site is marked as a tail call.

Function * getCaller()

Helper to get the caller (the parent function).

CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...

SmallVector< BasicBlock *, 16 > getIndirectDests() const

void setDefaultDest(BasicBlock *B)

void setIndirectDest(unsigned i, BasicBlock *B)

BasicBlock * getDefaultDest() const

static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)

CallBrInst * cloneImpl() const

This class represents a function call, abstracting a target machine's calling convention.

void updateProfWeight(uint64_t S, uint64_t T)

Updates profile metadata by scaling it by S / T.

TailCallKind getTailCallKind() const

CallInst * cloneImpl() const

static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

This is the base class for all instructions that perform data casts.

static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)

Returns the opcode necessary to cast Val into Ty using usual casting rules.

static CastInst * CreatePointerBitCastOrAddrSpaceCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Create a BitCast or an AddrSpaceCast cast instruction.

Instruction::CastOps getOpcode() const

Return the opcode of this CastInst.

static CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Create a ZExt, BitCast, or Trunc for int -> int casts.

static CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.

static unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, Type *DstIntPtrTy)

Determine how a pair of casts can be eliminated, if they can be at all.

static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)

Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.

static bool isBitCastable(Type *SrcTy, Type *DestTy)

Check whether a bitcast between these types is valid.

static CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Create a Trunc or BitCast cast instruction.

static CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.

static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.

static bool isNoopCast(Instruction::CastOps Opcode, Type *SrcTy, Type *DstTy, const DataLayout &DL)

A no-op cast is one that can be effected without changing any bits.

static CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Create a ZExt or BitCast cast instruction.

static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...

bool isIntegerCast() const

There are several places where we need to know if a cast instruction only deals with integer source a...

static CastInst * CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Create a SExt or BitCast cast instruction.

static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)

This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.

CatchReturnInst * cloneImpl() const

void setUnwindDest(BasicBlock *UnwindDest)

void addHandler(BasicBlock *Dest)

Add an entry to the switch instruction... Note: This action invalidates handler_end().

CatchSwitchInst * cloneImpl() const

Value * getParentPad() const

void setParentPad(Value *ParentPad)

BasicBlock * getUnwindDest() const

void removeHandler(handler_iterator HI)

bool hasUnwindDest() const

CleanupReturnInst * cloneImpl() const

This class is the base class for the comparison instructions.

Predicate getStrictPredicate() const

For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.

bool isEquality() const

Determine if this is an equals/not equals predicate.

void setPredicate(Predicate P)

Set the predicate for this instruction to the specified value.

bool isFalseWhenEqual() const

This is just a convenience.

Predicate

This enumeration lists the possible predicates for CmpInst subclasses.

@ FCMP_OEQ

0 0 0 1 True if ordered and equal

@ FCMP_TRUE

1 1 1 1 Always true (always folded)

@ ICMP_SLT

signed less than

@ ICMP_SLE

signed less or equal

@ FCMP_OLT

0 1 0 0 True if ordered and less than

@ FCMP_ULE

1 1 0 1 True if unordered, less than, or equal

@ FCMP_OGT

0 0 1 0 True if ordered and greater than

@ FCMP_OGE

0 0 1 1 True if ordered and greater than or equal

@ ICMP_UGE

unsigned greater or equal

@ ICMP_UGT

unsigned greater than

@ ICMP_SGT

signed greater than

@ FCMP_ULT

1 1 0 0 True if unordered or less than

@ FCMP_ONE

0 1 1 0 True if ordered and operands are unequal

@ FCMP_UEQ

1 0 0 1 True if unordered or equal

@ ICMP_ULT

unsigned less than

@ FCMP_UGT

1 0 1 0 True if unordered or greater than

@ FCMP_OLE

0 1 0 1 True if ordered and less than or equal

@ FCMP_ORD

0 1 1 1 True if ordered (no nans)

@ ICMP_SGE

signed greater or equal

@ FCMP_UNE

1 1 1 0 True if unordered or not equal

@ ICMP_ULE

unsigned less or equal

@ FCMP_UGE

1 0 1 1 True if unordered, greater than, or equal

@ FCMP_FALSE

0 0 0 0 Always false (always folded)

@ FCMP_UNO

1 0 0 0 True if unordered: isnan(X) | isnan(Y)

bool isEquivalence(bool Invert=false) const

Determine if one operand of this compare can always be replaced by the other operand,...

Predicate getSwappedPredicate() const

For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.

bool isTrueWhenEqual() const

This is just a convenience.

static CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Construct a compare instruction, given the opcode, the predicate and the two operands.

Predicate getNonStrictPredicate() const

For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.

static CmpInst * CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Instruction *FlagsSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)

Construct a compare instruction, given the opcode, the predicate, the two operands and the instructio...

bool isNonStrictPredicate() const

bool isFPPredicate() const

void swapOperands()

This is just a convenience that dispatches to the subclasses.

Predicate getInversePredicate() const

For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...

static StringRef getPredicateName(Predicate P)

Predicate getPredicate() const

Return the predicate for this instruction.

bool isStrictPredicate() const

static bool isUnordered(Predicate predicate)

Determine if the predicate is an unordered operation.

Predicate getFlippedStrictnessPredicate() const

For predicate of kind "is X or equal to 0" returns the predicate "is X".

bool isIntPredicate() const

static bool isOrdered(Predicate predicate)

Determine if the predicate is an ordered operation.

CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred, Value *LHS, Value *RHS, const Twine &Name="", InsertPosition InsertBefore=nullptr, Instruction *FlagsSource=nullptr)

bool isCommutative() const

This is just a convenience that dispatches to the subclasses.

bool isRelational() const

Return true if the predicate is relational (not EQ or NE).

An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...

static std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)

Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...

CmpPredicate()

Default constructor.

static CmpPredicate get(const CmpInst *Cmp)

Do a ICmpInst::getCmpPredicate() or CmpInst::getPredicate(), as appropriate.

CmpInst::Predicate getPreferredSignedPredicate() const

Attempts to return a signed CmpInst::Predicate from the CmpPredicate.

bool hasSameSign() const

Query samesign information, for optimizations.

static CmpPredicate getSwapped(CmpPredicate P)

Get the swapped predicate of a CmpPredicate.

ConstantFP - Floating Point Values [float, double].

const APFloat & getValueAPF() const

This is the shared class of boolean and integer constants.

static Constant * get(ArrayRef< Constant * > V)

This is an important base class in LLVM.

static Constant * getAllOnesValue(Type *Ty)

static Constant * getNullValue(Type *Ty)

Constructor to create a '0' constant of arbitrary type.

This class represents an Operation in the Expression.

A parsed version of the target data layout string in and methods for querying it.

static constexpr ElementCount getFixed(ScalarTy MinVal)

This instruction compares its operands according to the predicate given to the constructor.

static bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)

Return result of LHS Pred RHS comparison.

FCmpInst * cloneImpl() const

Clone an identical FCmpInst.

This class represents an extension of floating point types.

FPExtInst * cloneImpl() const

Clone an identical FPExtInst.

FPExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Constructor with insert-before-instruction semantics.

float getFPAccuracy() const

Get the maximum error permitted by this operation in ULPs.

This class represents a cast from floating point to signed integer.

FPToSIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Constructor with insert-before-instruction semantics.

FPToSIInst * cloneImpl() const

Clone an identical FPToSIInst.

This class represents a cast from floating point to unsigned integer.

FPToUIInst * cloneImpl() const

Clone an identical FPToUIInst.

FPToUIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Constructor with insert-before-instruction semantics.

This class represents a truncation of floating point types.

FPTruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Constructor with insert-before-instruction semantics.

FPTruncInst * cloneImpl() const

Clone an identical FPTruncInst.

An instruction for ordering other memory operations.

FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, InsertPosition InsertBefore=nullptr)

SyncScope::ID getSyncScopeID() const

Returns the synchronization scope ID of this fence instruction.

void setSyncScopeID(SyncScope::ID SSID)

Sets the synchronization scope ID of this fence instruction.

FenceInst * cloneImpl() const

void setOrdering(AtomicOrdering Ordering)

Sets the ordering constraint of this fence instruction.

AtomicOrdering getOrdering() const

Returns the ordering constraint of this fence instruction.

Class to represent fixed width SIMD vectors.

unsigned getNumElements() const

This class represents a freeze function that returns random concrete value if an operand is either a ...

FreezeInst(Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

FreezeInst * cloneImpl() const

Clone an identical FreezeInst.

void setParentPad(Value *ParentPad)

Value * getParentPad() const

Convenience accessors.

FuncletPadInst * cloneImpl() const

Class to represent function types.

unsigned getNumParams() const

Return the number of fixed parameters this function type requires.

Type * getParamType(unsigned i) const

Parameter type accessors.

Represents flags for the getelementptr instruction/expression.

static GEPNoWrapFlags inBounds()

GEPNoWrapFlags withoutInBounds() const

an instruction for type-safe pointer arithmetic to access elements of arrays and structs

bool isInBounds() const

Determine whether the GEP has the inbounds flag.

bool hasNoUnsignedSignedWrap() const

Determine whether the GEP has the nusw flag.

static Type * getTypeAtIndex(Type *Ty, Value *Idx)

Return the type of the element at the given index of an indexable type.

bool hasAllZeroIndices() const

Return true if all of the indices of this GEP are zeros.

bool hasNoUnsignedWrap() const

Determine whether the GEP has the nuw flag.

bool hasAllConstantIndices() const

Return true if all of the indices of this GEP are constant integers.

void setIsInBounds(bool b=true)

Set or clear the inbounds flag on this GEP instruction.

static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)

Returns the result type of a getelementptr with the given source element type and indexes.

bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const

Accumulate the constant address offset of this GEP if possible.

GetElementPtrInst * cloneImpl() const

bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const

void setNoWrapFlags(GEPNoWrapFlags NW)

Set nowrap flags for GEP instruction.

GEPNoWrapFlags getNoWrapFlags() const

Get the nowrap flags for the GEP instruction.

This instruction compares its operands according to the predicate given to the constructor.

static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)

Return result of LHS Pred RHS comparison.

ICmpInst * cloneImpl() const

Clone an identical ICmpInst.

CmpPredicate getInverseCmpPredicate() const

Predicate getFlippedSignednessPredicate() const

For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.

Predicate getSignedPredicate() const

For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.

bool isEquality() const

Return true if this predicate is either EQ or NE.

static std::optional< bool > isImpliedByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)

Determine if Pred1 implies Pred2 is true, false, or if nothing can be inferred about the implication,...

Predicate getUnsignedPredicate() const

For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.

Indirect Branch Instruction.

void addDestination(BasicBlock *Dest)

Add a destination.

void removeDestination(unsigned i)

This method removes the specified successor from the indirectbr instruction.

IndirectBrInst * cloneImpl() const

This instruction inserts a single (scalar) element into a VectorType value.

InsertElementInst * cloneImpl() const

static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

static bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)

Return true if an insertelement instruction can be formed with the specified operands.

BasicBlock * getBasicBlock()

This instruction inserts a struct field of array element value into an aggregate value.

InsertValueInst * cloneImpl() const

BitfieldElement::Type getSubclassData() const

bool hasNoNaNs() const LLVM_READONLY

Determine whether the no-NaNs flag is set.

void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)

Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...

const DebugLoc & getDebugLoc() const

Return the debug location for this node as a DebugLoc.

bool isCommutative() const LLVM_READONLY

Return true if the instruction is commutative:

InstListType::iterator eraseFromParent()

This method unlinks 'this' from the containing basic block and deletes it.

void swapProfMetadata()

If the instruction has "branch_weights" MD_prof metadata and the MDNode has three operands (including...

unsigned getOpcode() const

Returns a member of one of the enums like Instruction::Add.

This class represents a cast from an integer to a pointer.

IntToPtrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Constructor with insert-before-instruction semantics.

IntToPtrInst * cloneImpl() const

Clone an identical IntToPtrInst.

BasicBlock * getUnwindDest() const

void setNormalDest(BasicBlock *B)

InvokeInst * cloneImpl() const

LandingPadInst * getLandingPadInst() const

Get the landingpad instruction from the landing pad block (the unwind destination).

void setUnwindDest(BasicBlock *B)

void updateProfWeight(uint64_t S, uint64_t T)

Updates profile metadata by scaling it by S / T.

static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)

This is an important class for using LLVM in a threaded context.

LLVMContextImpl *const pImpl

The landingpad instruction holds all of the information necessary to generate correct exception handl...

bool isCleanup() const

Return 'true' if this landingpad instruction is a cleanup.

LandingPadInst * cloneImpl() const

static LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...

void addClause(Constant *ClauseVal)

Add a catch or filter clause to the landing pad.

void setCleanup(bool V)

Indicate that this landingpad instruction is a cleanup.

An instruction for reading from memory.

void setAlignment(Align Align)

bool isVolatile() const

Return true if this is a load from a volatile memory location.

void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)

Sets the ordering constraint and the synchronization scope ID of this load instruction.

LoadInst * cloneImpl() const

AtomicOrdering getOrdering() const

Returns the ordering constraint of this load instruction.

void setVolatile(bool V)

Specify whether this is a volatile load or not.

SyncScope::ID getSyncScopeID() const

Returns the synchronization scope ID of this load instruction.

LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, InsertPosition InsertBefore)

Align getAlign() const

Return the alignment of the access that is being performed.

MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight, bool IsExpected=false)

Return metadata containing two branch weights.

const MDOperand & getOperand(unsigned I) const

static MemoryEffectsBase readOnly()

Create MemoryEffectsBase that can read any memory.

bool onlyWritesMemory() const

Whether this function only (at most) writes memory.

bool doesNotAccessMemory() const

Whether this function accesses no memory.

static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)

Create MemoryEffectsBase that can only access argument memory.

static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)

Create MemoryEffectsBase that can only access inaccessible memory.

bool onlyAccessesInaccessibleMem() const

Whether this function only (at most) accesses inaccessible memory.

bool onlyAccessesArgPointees() const

Whether this function only (at most) accesses argument memory.

bool onlyReadsMemory() const

Whether this function only (at most) reads memory.

static MemoryEffectsBase writeOnly()

Create MemoryEffectsBase that can write any memory.

static MemoryEffectsBase inaccessibleOrArgMemOnly(ModRefInfo MR=ModRefInfo::ModRef)

Create MemoryEffectsBase that can only access inaccessible or argument memory.

static MemoryEffectsBase none()

Create MemoryEffectsBase that cannot read or write any memory.

bool onlyAccessesInaccessibleOrArgMem() const

Whether this function only (at most) accesses argument and inaccessible memory.

A container for an operand bundle being viewed as a set of values rather than a set of uses.

iterator_range< const_block_iterator > blocks() const

void allocHungoffUses(unsigned N)

const_block_iterator block_begin() const

void removeIncomingValueIf(function_ref< bool(unsigned)> Predicate, bool DeletePHIIfEmpty=true)

Remove all incoming values for which the predicate returns true.

Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)

Remove an incoming value.

bool hasConstantOrUndefValue() const

Whether the specified PHI node always merges together the same value, assuming undefs are equal to a ...

void copyIncomingBlocks(iterator_range< const_block_iterator > BBRange, uint32_t ToIdx=0)

Copies the basic blocks from BBRange to the incoming basic block list of this PHINode,...

const_block_iterator block_end() const

Value * getIncomingValue(unsigned i) const

Return incoming value number x.

Value * hasConstantValue() const

If the specified PHI node always merges together the same value, return the value,...

PHINode * cloneImpl() const

unsigned getNumIncomingValues() const

Return the number of incoming edges.

Class to represent pointers.

unsigned getAddressSpace() const

Return the address space of the Pointer type.

static PoisonValue * get(Type *T)

Static factory methods - Return an 'poison' object of the specified type.

This class represents a cast from a pointer to an integer.

PtrToIntInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Constructor with insert-before-instruction semantics.

PtrToIntInst * cloneImpl() const

Clone an identical PtrToIntInst.

Resume the propagation of an exception.

ResumeInst * cloneImpl() const

Return a value (possibly void), from a function.

ReturnInst * cloneImpl() const

This class represents a sign extension of integer types.

SExtInst * cloneImpl() const

Clone an identical SExtInst.

SExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Constructor with insert-before-instruction semantics.

This class represents a cast from signed integer to floating point.

SIToFPInst * cloneImpl() const

Clone an identical SIToFPInst.

SIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Constructor with insert-before-instruction semantics.

Class to represent scalable SIMD vectors.

This class represents the LLVM 'select' instruction.

static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)

SelectInst * cloneImpl() const

static const char * areInvalidOperands(Value *Cond, Value *True, Value *False)

Return a string if the specified operands are invalid for a select operation, otherwise return null.

This instruction constructs a fixed permutation of two input vectors.

static bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)

Return true if this shuffle mask chooses all elements with the same value as the first element of exa...

ArrayRef< int > getShuffleMask() const

static bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)

Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...

int getMaskValue(unsigned Elt) const

Return the shuffle mask value of this instruction for the given element index.

ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

static bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)

Return true if a shufflevector instruction can be formed with the specified operands.

static bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)

Return true if this shuffle mask chooses elements from its source vectors without lane crossings.

static bool isBitRotateMask(ArrayRef< int > Mask, unsigned EltSizeInBits, unsigned MinSubElts, unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt)

Checks if the shuffle is a bit rotation of the first operand across multiple subelements,...

VectorType * getType() const

Overload to return most specific vector type.

bool isIdentityWithExtract() const

Return true if this shuffle extracts the first N elements of exactly one source vector.

static bool isOneUseSingleSourceMask(ArrayRef< int > Mask, int VF)

Return true if this shuffle mask represents "clustered" mask of size VF, i.e.

bool isIdentityWithPadding() const

Return true if this shuffle lengthens exactly one source vector with undefs in the high elements.

static bool isSingleSourceMask(ArrayRef< int > Mask, int NumSrcElts)

Return true if this shuffle mask chooses elements from exactly one source vector.

bool isConcat() const

Return true if this shuffle concatenates its 2 source vectors.

static bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor, unsigned &Index)

Check if the mask is a DE-interleave mask of the given factor Factor like: <Index,...

ShuffleVectorInst * cloneImpl() const

static bool isIdentityMask(ArrayRef< int > Mask, int NumSrcElts)

Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...

static bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)

Return true if this shuffle mask is an extract subvector mask.

void setShuffleMask(ArrayRef< int > Mask)

bool isInterleave(unsigned Factor)

Return if this shuffle interleaves its two input vectors together.

static bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)

Return true if this shuffle mask swaps the order of elements from exactly one source vector.

static bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)

Return true if this shuffle mask is a transpose mask.

void commute()

Swap the operands and adjust the mask to preserve the semantics of the instruction.

static bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)

Return true if this shuffle mask is an insert subvector mask.

static Constant * convertShuffleMaskForBitcode(ArrayRef< int > Mask, Type *ResultTy)

static bool isReplicationMask(ArrayRef< int > Mask, int &ReplicationFactor, int &VF)

Return true if this shuffle mask replicates each of the VF elements in a vector ReplicationFactor tim...

static bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)

Return true if the mask interleaves one or more input vectors together.

This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...

Implements a dense probed hash-table based set with some number of buckets stored inline.

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

void assign(size_type NumElts, ValueParamT Elt)

reference emplace_back(ArgTypes &&... Args)

void append(ItTy in_start, ItTy in_end)

Add the specified range to the end of the SmallVector.

void push_back(const T &Elt)

This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.

An instruction for storing to memory.

AtomicOrdering getOrdering() const

Returns the ordering constraint of this store instruction.

void setVolatile(bool V)

Specify whether this is a volatile store or not.

void setAlignment(Align Align)

StoreInst * cloneImpl() const

StoreInst(Value *Val, Value *Ptr, InsertPosition InsertBefore)

SyncScope::ID getSyncScopeID() const

Returns the synchronization scope ID of this store instruction.

bool isVolatile() const

Return true if this is a store to a volatile memory location.

void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)

Sets the ordering constraint and the synchronization scope ID of this store instruction.

StringRef - Represent a constant reference to a string, i.e.

Class to represent struct types.

void setSuccessorWeight(unsigned idx, CaseWeightOpt W)

Instruction::InstListType::iterator eraseFromParent()

Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...

void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)

Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...

CaseWeightOpt getSuccessorWeight(unsigned idx)

MDNode * buildProfBranchWeightsMD()

std::optional< uint32_t > CaseWeightOpt

SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)

Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.

void setValue(ConstantInt *V) const

Sets the new value for current case.

void setSuccessor(BasicBlock *S) const

Sets the new successor for current case.

SwitchInst * cloneImpl() const

void addCase(ConstantInt *OnVal, BasicBlock *Dest)

Add an entry to the switch instruction.

CaseIteratorImpl< CaseHandle > CaseIt

unsigned getNumCases() const

Return the number of 'cases' in this switch instruction, excluding the default case.

CaseIt removeCase(CaseIt I)

This method removes the specified case and its successor from the switch instruction.

This class represents a truncation of integer types.

TruncInst * cloneImpl() const

Clone an identical TruncInst.

TruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Constructor with insert-before-instruction semantics.

Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...

static constexpr TypeSize getFixed(ScalarTy ExactSize)

static constexpr TypeSize get(ScalarTy Quantity, bool Scalable)

The instances of the Type class are immutable: once they are created, they are never changed.

bool isVectorTy() const

True if this is an instance of VectorType.

bool isIntOrIntVectorTy() const

Return true if this is an integer type or a vector of integer types.

bool isPointerTy() const

True if this is an instance of PointerType.

static IntegerType * getInt1Ty(LLVMContext &C)

unsigned getPointerAddressSpace() const

Get the address space of this pointer or pointer vector type.

unsigned getScalarSizeInBits() const LLVM_READONLY

If this is a vector type, return the getPrimitiveSizeInBits value for the element type.

bool isFirstClassType() const

Return true if the type is "first class", meaning it is a valid type for a Value.

bool isAggregateType() const

Return true if the type is an aggregate type.

LLVMContext & getContext() const

Return the LLVMContext in which this type was uniqued.

bool isFloatingPointTy() const

Return true if this is one of the floating-point types.

bool isPtrOrPtrVectorTy() const

Return true if this is a pointer type or a vector of pointer types.

static IntegerType * getInt32Ty(LLVMContext &C)

bool isIntegerTy() const

True if this is an instance of IntegerType.

bool isTokenTy() const

Return true if this is 'token'.

bool isFPOrFPVectorTy() const

Return true if this is a FP type or a vector of FP.

TypeSize getPrimitiveSizeInBits() const LLVM_READONLY

Return the basic size of this type if it is a primitive type.

bool isVoidTy() const

Return true if this is 'void'.

Type * getScalarType() const

If this is a vector type, return the element type, otherwise return 'this'.

This class represents a cast unsigned integer to floating point.

UIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Constructor with insert-before-instruction semantics.

UIToFPInst * cloneImpl() const

Clone an identical UIToFPInst.

static UnaryOperator * Create(UnaryOps Op, Value *S, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)

Construct a unary instruction, given the opcode and an operand.

UnaryOperator(UnaryOps iType, Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore)

UnaryOperator * cloneImpl() const

UnaryOps getOpcode() const

This function has undefined behavior.

UnreachableInst(LLVMContext &C, InsertPosition InsertBefore=nullptr)

UnreachableInst * cloneImpl() const

A Use represents the edge between a Value definition and its users.

const Use * getOperandList() const

void allocHungoffUses(unsigned N, bool IsPhi=false)

Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.

void setNumHungOffUseOperands(unsigned NumOps)

Subclasses with hung off uses need to manage the operand count themselves.

Value * getOperand(unsigned i) const

unsigned getNumOperands() const

void growHungoffUses(unsigned N, bool IsPhi=false)

Grow the number of hung off uses.

This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...

VAArgInst * cloneImpl() const

LLVM Value Representation.

Type * getType() const

All values are typed, get the type of this value.

unsigned char SubclassOptionalData

Hold subclass data that can be dropped.

void setName(const Twine &Name)

Change the name of the value.

void replaceAllUsesWith(Value *V)

Change all uses of this to point to a new Value.

LLVMContext & getContext() const

All values hold a context through their type.

StringRef getName() const

Return a constant reference to the value's name.

Base class of all SIMD vector types.

ElementCount getElementCount() const

Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...

static VectorType * get(Type *ElementType, ElementCount EC)

This static method is the primary way to construct an VectorType.

This class represents zero extension of integer types.

ZExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)

Constructor with insert-before-instruction semantics.

ZExtInst * cloneImpl() const

Clone an identical ZExtInst.

std::pair< iterator, bool > insert(const ValueT &V)

bool contains(const_arg_type_t< ValueT > V) const

Check if the set contains the given element.

constexpr ScalarTy getKnownMinValue() const

Returns the minimum value this quantity can represent.

An efficient, type-erasing, non-owning reference to a callable.

const ParentTy * getParent() const

base_list_type::iterator iterator

This class implements an extremely fast bulk output stream that can only output to a stream.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

constexpr char Attrs[]

Key for Kernel::Metadata::mAttrs.

@ C

The default llvm calling convention, compatible with C.

bool match(Val *V, const Pattern &P)

cstfp_pred_ty< is_non_zero_not_denormal_fp > m_NonZeroNotDenormalFP()

Match a floating-point non-zero that is not a denormal.

initializer< Ty > init(const Ty &Val)

@ Switch

The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...

This is an optimization pass for GlobalISel generic memory operations.

auto drop_begin(T &&RangeOrContainer, size_t N=1)

Return a range covering RangeOrContainer with the first N elements excluded.

bool all_of(R &&range, UnaryPredicate P)

Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.

auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)

Get the size of a range.

unsigned getPointerAddressSpace(const Type *T)

iterator_range< T > make_range(T x, T y)

Convenience function for iterating over sub-ranges.

MDNode * getBranchWeightMDNode(const Instruction &I)

Get the branch weights metadata node.

std::enable_if_t< std::is_unsigned_v< T >, std::optional< T > > checkedMulUnsigned(T LHS, T RHS)

Multiply two unsigned integers LHS and RHS.

auto reverse(ContainerTy &&C)

constexpr bool isPowerOf2_32(uint32_t Value)

Return true if the argument is a power of two > 0.

decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)

FPClassTest

Floating-point class tests, supported by 'is_fpclass' intrinsic.

bool NullPointerIsDefined(const Function *F, unsigned AS=0)

Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...

raw_ostream & dbgs()

dbgs() - This returns a reference to a raw_ostream for debugging messages.

bool isPointerTy(const Type *T)

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

constexpr int PoisonMaskElem

unsigned getNumBranchWeights(const MDNode &ProfileData)

AtomicOrdering

Atomic ordering for LLVM's memory model.

auto remove_if(R &&Range, UnaryPredicate P)

Provide wrappers to std::remove_if which take ranges instead of having to pass begin/end explicitly.

@ Or

Bitwise or logical OR of integers.

@ Mul

Product of integers.

@ Xor

Bitwise or logical XOR of integers.

@ And

Bitwise or logical AND of integers.

raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)

OutputIt copy(R &&Range, OutputIt Out)

constexpr unsigned BitWidth

bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)

Extract branch weights from MD_prof metadata.

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

bool is_contained(R &&Range, const E &Element)

Returns true if Element is found in Range.

bool all_equal(std::initializer_list< T > Values)

Returns true if all Values in the initializer lists are equal or the list.

auto seq(T Begin, T End)

Iterate over an integral type from Begin up to - but not including - End.

@ Default

The result values are uniform if and only if all operands are uniform.

void scaleProfData(Instruction &I, uint64_t S, uint64_t T)

Scaling the profile data attached to 'I' using the ratio of S/T.

cmpResult

IEEE-754R 5.11: Floating Point Comparison Relations.

This struct is a compact representation of a valid (non-zero power of two) alignment.

Summary of memprof metadata on allocations.

Describes an element of a Bitfield.

Used to keep track of an operand bundle.

uint32_t End

The index in the Use& vector where operands for this operand bundle ends.

uint32_t Begin

The index in the Use& vector where operands for this operand bundle starts.

Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...

static std::optional< bool > eq(const KnownBits &LHS, const KnownBits &RHS)

Determine if these known bits always give the same ICMP_EQ result.

static std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)

Determine if these known bits always give the same ICMP_NE result.

static std::optional< bool > sge(const KnownBits &LHS, const KnownBits &RHS)

Determine if these known bits always give the same ICMP_SGE result.

static std::optional< bool > ugt(const KnownBits &LHS, const KnownBits &RHS)

Determine if these known bits always give the same ICMP_UGT result.

static std::optional< bool > slt(const KnownBits &LHS, const KnownBits &RHS)

Determine if these known bits always give the same ICMP_SLT result.

static std::optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)

Determine if these known bits always give the same ICMP_ULT result.

static std::optional< bool > ule(const KnownBits &LHS, const KnownBits &RHS)

Determine if these known bits always give the same ICMP_ULE result.

static std::optional< bool > sle(const KnownBits &LHS, const KnownBits &RHS)

Determine if these known bits always give the same ICMP_SLE result.

static std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)

Determine if these known bits always give the same ICMP_SGT result.

static std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)

Determine if these known bits always give the same ICMP_UGE result.

A MapVector that performs no allocations if smaller than a certain size.

Indicates this User has operands co-allocated.

Indicates this User has operands and a descriptor co-allocated .