clang: lib/CodeGen/CGAtomic.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

21#include "llvm/ADT/DenseMap.h"

22#include "llvm/IR/DataLayout.h"

23#include "llvm/IR/Intrinsics.h"

24

25using namespace clang;

26using namespace CodeGen;

27

28namespace {

29 class AtomicInfo {

38 bool UseLibcall;

41 public:

43 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),

48 AtomicTy = lvalue.getType();

50 ValueTy = ATy->getValueType();

51 else

52 ValueTy = AtomicTy;

54

57 TypeInfo ValueTI = C.getTypeInfo(ValueTy);

58 ValueSizeInBits = ValueTI.Width;

59 ValueAlignInBits = ValueTI.Align;

60

61 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);

62 AtomicSizeInBits = AtomicTI.Width;

63 AtomicAlignInBits = AtomicTI.Align;

64

65 assert(ValueSizeInBits <= AtomicSizeInBits);

66 assert(ValueAlignInBits <= AtomicAlignInBits);

67

68 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);

69 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);

72

73 LVal = lvalue;

75 ValueTy = lvalue.getType();

76 ValueSizeInBits = C.getTypeSize(ValueTy);

78 auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());

79 AtomicSizeInBits = C.toBits(

80 C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)

83 auto OffsetInChars =

84 (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *

86 llvm::Value *StoragePtr = CGF.Builder.CreateConstGEP1_64(

87 CGF.Int8Ty, BitFieldPtr, OffsetInChars.getQuantity());

89 StoragePtr, CGF.UnqualPtrTy, "atomic_bitfield_base");

90 BFI = OrigBFI;

94 llvm::Type *StorageTy = CGF.Builder.getIntNTy(AtomicSizeInBits);

95 LVal = LValue::MakeBitfield(

98 AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);

99 if (AtomicTy.isNull()) {

100 llvm::APInt Size(

101 32,

102 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());

103 AtomicTy = C.getConstantArrayType(C.CharTy, Size, nullptr,

104 ArraySizeModifier::Normal,

105 0);

106 }

107 AtomicAlign = ValueAlign = lvalue.getAlignment();

110 ValueSizeInBits = C.getTypeSize(ValueTy);

111 AtomicTy = lvalue.getType();

112 AtomicSizeInBits = C.getTypeSize(AtomicTy);

113 AtomicAlign = ValueAlign = lvalue.getAlignment();

114 LVal = lvalue;

115 } else {

117 ValueTy = lvalue.getType();

118 ValueSizeInBits = C.getTypeSize(ValueTy);

120 lvalue.getType(), castllvm::FixedVectorType(

122 ->getNumElements());

123 AtomicSizeInBits = C.getTypeSize(AtomicTy);

124 AtomicAlign = ValueAlign = lvalue.getAlignment();

125 LVal = lvalue;

126 }

127 UseLibcall = C.getTargetInfo().hasBuiltinAtomic(

128 AtomicSizeInBits, C.toBits(lvalue.getAlignment()));

129 }

130

131 QualType getAtomicType() const { return AtomicTy; }

132 QualType getValueType() const { return ValueTy; }

133 CharUnits getAtomicAlignment() const { return AtomicAlign; }

134 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }

135 uint64_t getValueSizeInBits() const { return ValueSizeInBits; }

136 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }

137 bool shouldUseLibcall() const { return UseLibcall; }

138 const LValue &getAtomicLValue() const { return LVal; }

139 llvm::Value *getAtomicPointer() const {

148 }

149 Address getAtomicAddress() const {

150 llvm::Type *ElTy;

157 else

159 return Address(getAtomicPointer(), ElTy, getAtomicAlignment());

160 }

161

162 Address getAtomicAddressAsAtomicIntPointer() const {

163 return castToAtomicIntPointer(getAtomicAddress());

164 }

165

166

167

168

169

170

171

172 bool hasPadding() const {

173 return (ValueSizeInBits != AtomicSizeInBits);

174 }

175

176 bool emitMemSetZeroIfNecessary() const;

177

178 llvm::Value *getAtomicSizeValue() const {

181 }

182

183

184

186

187

188

189

190 Address convertToAtomicIntPointer(Address Addr) const;

191

192

195

196 llvm::Value *getScalarRValValueOrNull(RValue RVal) const;

197

198

199 llvm::Value *convertRValueToInt(RValue RVal, bool CmpXchg = false) const;

200

201 RValue ConvertToValueOrAtomic(llvm::Value *IntVal, AggValueSlot ResultSlot,

203 bool CmpXchg = false) const;

204

205

206 void emitCopyIntoMemory(RValue rvalue) const;

207

208

209 LValue projectValue() const {

211 Address addr = getAtomicAddress();

212 if (hasPadding())

214

215 return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),

217 }

218

219

220

222 bool AsValue, llvm::AtomicOrdering AO,

223 bool IsVolatile);

224

225

226

227

228

229

230

231

232

233 std::pair<RValue, llvm::Value *>

235 llvm::AtomicOrdering Success =

236 llvm::AtomicOrdering::SequentiallyConsistent,

237 llvm::AtomicOrdering Failure =

238 llvm::AtomicOrdering::SequentiallyConsistent,

239 bool IsWeak = false);

240

241

242

243

244 void EmitAtomicUpdate(llvm::AtomicOrdering AO,

245 const llvm::function_ref<RValue(RValue)> &UpdateOp,

246 bool IsVolatile);

247

248

249 void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,

250 bool IsVolatile);

251

252

254

255

256 Address CreateTempAlloca() const;

257 private:

258 bool requiresMemSetZero(llvm::Type *type) const;

259

260

261

262 void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,

263 llvm::AtomicOrdering AO, bool IsVolatile);

264

265 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile,

266 bool CmpXchg = false);

267

268 llvm::Value *EmitAtomicCompareExchangeLibcall(

269 llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,

270 llvm::AtomicOrdering Success =

271 llvm::AtomicOrdering::SequentiallyConsistent,

272 llvm::AtomicOrdering Failure =

273 llvm::AtomicOrdering::SequentiallyConsistent);

274

275 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(

276 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,

277 llvm::AtomicOrdering Success =

278 llvm::AtomicOrdering::SequentiallyConsistent,

279 llvm::AtomicOrdering Failure =

280 llvm::AtomicOrdering::SequentiallyConsistent,

281 bool IsWeak = false);

282

283 void

284 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,

285 const llvm::function_ref<RValue(RValue)> &UpdateOp,

286 bool IsVolatile);

287

288 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,

289 const llvm::function_ref<RValue(RValue)> &UpdateOp,

290 bool IsVolatile);

291

292 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,

293 bool IsVolatile);

294

295 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,

296 bool IsVolatile);

297 };

298}

299

300Address AtomicInfo::CreateTempAlloca() const {

302 (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy

303 : AtomicTy,

304 getAtomicAlignment(),

305 "atomic-temp");

306

309 TempAlloca, getAtomicAddress().getType(),

310 getAtomicAddress().getElementType());

311 return TempAlloca;

312}

313

315 StringRef fnName,

322 fnAttrB.addAttribute(llvm::Attribute::NoUnwind);

323 fnAttrB.addAttribute(llvm::Attribute::WillReturn);

324 llvm::AttributeList fnAttrs = llvm::AttributeList::get(

325 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);

326

327 llvm::FunctionCallee fn =

331}

332

333

335 uint64_t expectedSize) {

336 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);

337}

338

339

340

341

342bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {

343

344 if (hasPadding()) return true;

345

346

347 switch (getEvaluationKind()) {

348

349

354 AtomicSizeInBits / 2);

355

356

358 return false;

359 }

360 llvm_unreachable("bad evaluation kind");

361}

362

363bool AtomicInfo::emitMemSetZeroIfNecessary() const {

367 return false;

368

373 return true;

374}

375

379 uint64_t Size,

380 llvm::AtomicOrdering SuccessOrder,

381 llvm::AtomicOrdering FailureOrder,

382 llvm::SyncScope::ID Scope) {

383

386

388 Ptr, Expected, Desired, SuccessOrder, FailureOrder, Scope);

389 Pair->setVolatile(E->isVolatile());

390 Pair->setWeak(IsWeak);

392

393

394

395 llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);

396 llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);

397

398

399

400 llvm::BasicBlock *StoreExpectedBB =

402

403

404

405 llvm::BasicBlock *ContinueBB =

407

408

409

410 CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);

411

412 CGF.Builder.SetInsertPoint(StoreExpectedBB);

413

415

416 CGF.Builder.CreateBr(ContinueBB);

417

418 CGF.Builder.SetInsertPoint(ContinueBB);

419

421}

422

423

424

425

429 llvm::Value *FailureOrderVal,

430 uint64_t Size,

431 llvm::AtomicOrdering SuccessOrder,

432 llvm::SyncScope::ID Scope) {

433 llvm::AtomicOrdering FailureOrder;

434 if (llvm::ConstantInt *FO = dyn_castllvm::ConstantInt(FailureOrderVal)) {

435 auto FOS = FO->getSExtValue();

436 if (!llvm::isValidAtomicOrderingCABI(FOS))

437 FailureOrder = llvm::AtomicOrdering::Monotonic;

438 else

439 switch ((llvm::AtomicOrderingCABI)FOS) {

440 case llvm::AtomicOrderingCABI::relaxed:

441

442

443 case llvm::AtomicOrderingCABI::release:

444 case llvm::AtomicOrderingCABI::acq_rel:

445 FailureOrder = llvm::AtomicOrdering::Monotonic;

446 break;

447 case llvm::AtomicOrderingCABI::consume:

448 case llvm::AtomicOrderingCABI::acquire:

449 FailureOrder = llvm::AtomicOrdering::Acquire;

450 break;

451 case llvm::AtomicOrderingCABI::seq_cst:

452 FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;

453 break;

454 }

455

456

457

458

459 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,

460 FailureOrder, Scope);

461 return;

462 }

463

464

469

470

471

472

473 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);

474

475 SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),

476 AcquireBB);

477 SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),

478 AcquireBB);

479 SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),

480 SeqCstBB);

481

482

483 CGF.Builder.SetInsertPoint(MonotonicBB);

485 Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);

486 CGF.Builder.CreateBr(ContBB);

487

488 CGF.Builder.SetInsertPoint(AcquireBB);

489 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,

490 llvm::AtomicOrdering::Acquire, Scope);

491 CGF.Builder.CreateBr(ContBB);

492

493 CGF.Builder.SetInsertPoint(SeqCstBB);

494 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,

495 llvm::AtomicOrdering::SequentiallyConsistent, Scope);

496 CGF.Builder.CreateBr(ContBB);

497

498 CGF.Builder.SetInsertPoint(ContBB);

499}

500

501

502

505 bool IsSigned,

506 llvm::Value *OldVal,

507 llvm::Value *RHS) {

508 llvm::CmpInst::Predicate Pred;

509 switch (Op) {

510 default:

511 llvm_unreachable("Unexpected min/max operation");

512 case AtomicExpr::AO__atomic_max_fetch:

513 case AtomicExpr::AO__scoped_atomic_max_fetch:

514 Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;

515 break;

516 case AtomicExpr::AO__atomic_min_fetch:

517 case AtomicExpr::AO__scoped_atomic_min_fetch:

518 Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;

519 break;

520 }

521 llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS, "tst");

522 return Builder.CreateSelect(Cmp, OldVal, RHS, "newval");

523}

524

527 llvm::Value *IsWeak, llvm::Value *FailureOrder,

528 uint64_t Size, llvm::AtomicOrdering Order,

529 llvm::SyncScope::ID Scope) {

530 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;

531 bool PostOpMinMax = false;

532 unsigned PostOp = 0;

533

534 switch (E->getOp()) {

535 case AtomicExpr::AO__c11_atomic_init:

536 case AtomicExpr::AO__opencl_atomic_init:

537 llvm_unreachable("Already handled!");

538

539 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:

540 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:

541 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:

543 FailureOrder, Size, Order, Scope);

544 return;

545 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:

546 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:

547 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:

549 FailureOrder, Size, Order, Scope);

550 return;

551 case AtomicExpr::AO__atomic_compare_exchange:

552 case AtomicExpr::AO__atomic_compare_exchange_n:

553 case AtomicExpr::AO__scoped_atomic_compare_exchange:

554 case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {

555 if (llvm::ConstantInt *IsWeakC = dyn_castllvm::ConstantInt(IsWeak)) {

557 Val1, Val2, FailureOrder, Size, Order, Scope);

558 } else {

559

560 llvm::BasicBlock *StrongBB =

563 llvm::BasicBlock *ContBB =

565

566 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);

567 SI->addCase(CGF.Builder.getInt1(false), StrongBB);

568

569 CGF.Builder.SetInsertPoint(StrongBB);

571 FailureOrder, Size, Order, Scope);

572 CGF.Builder.CreateBr(ContBB);

573

574 CGF.Builder.SetInsertPoint(WeakBB);

576 FailureOrder, Size, Order, Scope);

577 CGF.Builder.CreateBr(ContBB);

578

579 CGF.Builder.SetInsertPoint(ContBB);

580 }

581 return;

582 }

583 case AtomicExpr::AO__c11_atomic_load:

584 case AtomicExpr::AO__opencl_atomic_load:

585 case AtomicExpr::AO__hip_atomic_load:

586 case AtomicExpr::AO__atomic_load_n:

587 case AtomicExpr::AO__atomic_load:

588 case AtomicExpr::AO__scoped_atomic_load_n:

589 case AtomicExpr::AO__scoped_atomic_load: {

591 Load->setAtomic(Order, Scope);

592 Load->setVolatile(E->isVolatile());

594 return;

595 }

596

597 case AtomicExpr::AO__c11_atomic_store:

598 case AtomicExpr::AO__opencl_atomic_store:

599 case AtomicExpr::AO__hip_atomic_store:

600 case AtomicExpr::AO__atomic_store:

601 case AtomicExpr::AO__atomic_store_n:

602 case AtomicExpr::AO__scoped_atomic_store:

603 case AtomicExpr::AO__scoped_atomic_store_n: {

606 Store->setAtomic(Order, Scope);

607 Store->setVolatile(E->isVolatile());

608 return;

609 }

610

611 case AtomicExpr::AO__c11_atomic_exchange:

612 case AtomicExpr::AO__hip_atomic_exchange:

613 case AtomicExpr::AO__opencl_atomic_exchange:

614 case AtomicExpr::AO__atomic_exchange_n:

615 case AtomicExpr::AO__atomic_exchange:

616 case AtomicExpr::AO__scoped_atomic_exchange_n:

617 case AtomicExpr::AO__scoped_atomic_exchange:

618 Op = llvm::AtomicRMWInst::Xchg;

619 break;

620

621 case AtomicExpr::AO__atomic_add_fetch:

622 case AtomicExpr::AO__scoped_atomic_add_fetch:

623 PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FAdd

624 : llvm::Instruction::Add;

625 [[fallthrough]];

626 case AtomicExpr::AO__c11_atomic_fetch_add:

627 case AtomicExpr::AO__hip_atomic_fetch_add:

628 case AtomicExpr::AO__opencl_atomic_fetch_add:

629 case AtomicExpr::AO__atomic_fetch_add:

630 case AtomicExpr::AO__scoped_atomic_fetch_add:

631 Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FAdd

632 : llvm::AtomicRMWInst::Add;

633 break;

634

635 case AtomicExpr::AO__atomic_sub_fetch:

636 case AtomicExpr::AO__scoped_atomic_sub_fetch:

637 PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FSub

638 : llvm::Instruction::Sub;

639 [[fallthrough]];

640 case AtomicExpr::AO__c11_atomic_fetch_sub:

641 case AtomicExpr::AO__hip_atomic_fetch_sub:

642 case AtomicExpr::AO__opencl_atomic_fetch_sub:

643 case AtomicExpr::AO__atomic_fetch_sub:

644 case AtomicExpr::AO__scoped_atomic_fetch_sub:

645 Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FSub

646 : llvm::AtomicRMWInst::Sub;

647 break;

648

649 case AtomicExpr::AO__atomic_min_fetch:

650 case AtomicExpr::AO__scoped_atomic_min_fetch:

651 PostOpMinMax = true;

652 [[fallthrough]];

653 case AtomicExpr::AO__c11_atomic_fetch_min:

654 case AtomicExpr::AO__hip_atomic_fetch_min:

655 case AtomicExpr::AO__opencl_atomic_fetch_min:

656 case AtomicExpr::AO__atomic_fetch_min:

657 case AtomicExpr::AO__scoped_atomic_fetch_min:

658 Op = E->getValueType()->isFloatingType()

659 ? llvm::AtomicRMWInst::FMin

660 : (E->getValueType()->isSignedIntegerType()

661 ? llvm::AtomicRMWInst::Min

662 : llvm::AtomicRMWInst::UMin);

663 break;

664

665 case AtomicExpr::AO__atomic_max_fetch:

666 case AtomicExpr::AO__scoped_atomic_max_fetch:

667 PostOpMinMax = true;

668 [[fallthrough]];

669 case AtomicExpr::AO__c11_atomic_fetch_max:

670 case AtomicExpr::AO__hip_atomic_fetch_max:

671 case AtomicExpr::AO__opencl_atomic_fetch_max:

672 case AtomicExpr::AO__atomic_fetch_max:

673 case AtomicExpr::AO__scoped_atomic_fetch_max:

674 Op = E->getValueType()->isFloatingType()

675 ? llvm::AtomicRMWInst::FMax

676 : (E->getValueType()->isSignedIntegerType()

677 ? llvm::AtomicRMWInst::Max

678 : llvm::AtomicRMWInst::UMax);

679 break;

680

681 case AtomicExpr::AO__atomic_and_fetch:

682 case AtomicExpr::AO__scoped_atomic_and_fetch:

683 PostOp = llvm::Instruction::And;

684 [[fallthrough]];

685 case AtomicExpr::AO__c11_atomic_fetch_and:

686 case AtomicExpr::AO__hip_atomic_fetch_and:

687 case AtomicExpr::AO__opencl_atomic_fetch_and:

688 case AtomicExpr::AO__atomic_fetch_and:

689 case AtomicExpr::AO__scoped_atomic_fetch_and:

690 Op = llvm::AtomicRMWInst::And;

691 break;

692

693 case AtomicExpr::AO__atomic_or_fetch:

694 case AtomicExpr::AO__scoped_atomic_or_fetch:

695 PostOp = llvm::Instruction::Or;

696 [[fallthrough]];

697 case AtomicExpr::AO__c11_atomic_fetch_or:

698 case AtomicExpr::AO__hip_atomic_fetch_or:

699 case AtomicExpr::AO__opencl_atomic_fetch_or:

700 case AtomicExpr::AO__atomic_fetch_or:

701 case AtomicExpr::AO__scoped_atomic_fetch_or:

702 Op = llvm::AtomicRMWInst::Or;

703 break;

704

705 case AtomicExpr::AO__atomic_xor_fetch:

706 case AtomicExpr::AO__scoped_atomic_xor_fetch:

707 PostOp = llvm::Instruction::Xor;

708 [[fallthrough]];

709 case AtomicExpr::AO__c11_atomic_fetch_xor:

710 case AtomicExpr::AO__hip_atomic_fetch_xor:

711 case AtomicExpr::AO__opencl_atomic_fetch_xor:

712 case AtomicExpr::AO__atomic_fetch_xor:

713 case AtomicExpr::AO__scoped_atomic_fetch_xor:

714 Op = llvm::AtomicRMWInst::Xor;

715 break;

716

717 case AtomicExpr::AO__atomic_nand_fetch:

718 case AtomicExpr::AO__scoped_atomic_nand_fetch:

719 PostOp = llvm::Instruction::And;

720 [[fallthrough]];

721 case AtomicExpr::AO__c11_atomic_fetch_nand:

722 case AtomicExpr::AO__atomic_fetch_nand:

723 case AtomicExpr::AO__scoped_atomic_fetch_nand:

724 Op = llvm::AtomicRMWInst::Nand;

725 break;

726 }

727

729 llvm::AtomicRMWInst *RMWI =

731 RMWI->setVolatile(E->isVolatile());

732

733

734

735 llvm::Value *Result = RMWI;

736 if (PostOpMinMax)

738 E->getValueType()->isSignedIntegerType(),

739 RMWI, LoadVal1);

740 else if (PostOp)

741 Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,

742 LoadVal1);

743 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch ||

744 E->getOp() == AtomicExpr::AO__scoped_atomic_nand_fetch)

745 Result = CGF.Builder.CreateNot(Result);

747}

748

749

750

755 true);

756 return DeclPtr;

757}

758

761 llvm::Value *IsWeak, llvm::Value *FailureOrder,

762 uint64_t Size, llvm::AtomicOrdering Order,

763 llvm::Value *Scope) {

764 auto ScopeModel = Expr->getScopeModel();

765

766

767

768 if (!ScopeModel) {

769 llvm::SyncScope::ID SS;

771

772

773

774

776 SyncScope::OpenCLDevice,

778 else

779 SS = llvm::SyncScope::System;

780 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,

781 Order, SS);

782 return;

783 }

784

785

786 if (auto SC = dyn_castllvm::ConstantInt(Scope)) {

788 CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),

790 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,

791 Order, SCID);

792 return;

793 }

794

795

796 auto &Builder = CGF.Builder;

797 auto Scopes = ScopeModel->getRuntimeValues();

798 llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;

799 for (auto S : Scopes)

801

802 llvm::BasicBlock *ContBB =

804

805 auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);

806

807

808 auto FallBack = ScopeModel->getFallBackValue();

809 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);

810 for (auto S : Scopes) {

811 auto *B = BB[S];

812 if (S != FallBack)

813 SI->addCase(Builder.getInt32(S), B);

814

815 Builder.SetInsertPoint(B);

816 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,

817 Order,

819 ScopeModel->map(S),

820 Order,

822 Builder.CreateBr(ContBB);

823 }

824

825 Builder.SetInsertPoint(ContBB);

826}

827

832 MemTy = AT->getValueType();

833 llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;

834

839

840 if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||

841 E->getOp() == AtomicExpr::AO__opencl_atomic_init) {

845 }

846

850

854 bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0;

855 bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;

856 if (Misaligned) {

858 << (int)TInfo.Width.getQuantity()

860 }

861 if (Oversized) {

863 << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity();

864 }

865

867 llvm::Value *Scope =

868 E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;

869 bool ShouldCastToIntPtrTy = true;

870

871 switch (E->getOp()) {

872 case AtomicExpr::AO__c11_atomic_init:

873 case AtomicExpr::AO__opencl_atomic_init:

874 llvm_unreachable("Already handled above with EmitAtomicInit!");

875

876 case AtomicExpr::AO__atomic_load_n:

877 case AtomicExpr::AO__scoped_atomic_load_n:

878 case AtomicExpr::AO__c11_atomic_load:

879 case AtomicExpr::AO__opencl_atomic_load:

880 case AtomicExpr::AO__hip_atomic_load:

881 break;

882

883 case AtomicExpr::AO__atomic_load:

884 case AtomicExpr::AO__scoped_atomic_load:

886 break;

887

888 case AtomicExpr::AO__atomic_store:

889 case AtomicExpr::AO__scoped_atomic_store:

891 break;

892

893 case AtomicExpr::AO__atomic_exchange:

894 case AtomicExpr::AO__scoped_atomic_exchange:

897 break;

898

899 case AtomicExpr::AO__atomic_compare_exchange:

900 case AtomicExpr::AO__atomic_compare_exchange_n:

901 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:

902 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:

903 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:

904 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:

905 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:

906 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:

907 case AtomicExpr::AO__scoped_atomic_compare_exchange:

908 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:

910 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange ||

911 E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)

913 else

916 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||

917 E->getOp() == AtomicExpr::AO__atomic_compare_exchange ||

918 E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||

919 E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)

921 break;

922

923 case AtomicExpr::AO__c11_atomic_fetch_add:

924 case AtomicExpr::AO__c11_atomic_fetch_sub:

925 case AtomicExpr::AO__hip_atomic_fetch_add:

926 case AtomicExpr::AO__hip_atomic_fetch_sub:

927 case AtomicExpr::AO__opencl_atomic_fetch_add:

928 case AtomicExpr::AO__opencl_atomic_fetch_sub:

930

931

932

933

938 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));

940 Val1 = Temp;

942 break;

943 }

944 [[fallthrough]];

945 case AtomicExpr::AO__atomic_fetch_add:

946 case AtomicExpr::AO__atomic_fetch_max:

947 case AtomicExpr::AO__atomic_fetch_min:

948 case AtomicExpr::AO__atomic_fetch_sub:

949 case AtomicExpr::AO__atomic_add_fetch:

950 case AtomicExpr::AO__atomic_max_fetch:

951 case AtomicExpr::AO__atomic_min_fetch:

952 case AtomicExpr::AO__atomic_sub_fetch:

953 case AtomicExpr::AO__c11_atomic_fetch_max:

954 case AtomicExpr::AO__c11_atomic_fetch_min:

955 case AtomicExpr::AO__opencl_atomic_fetch_max:

956 case AtomicExpr::AO__opencl_atomic_fetch_min:

957 case AtomicExpr::AO__hip_atomic_fetch_max:

958 case AtomicExpr::AO__hip_atomic_fetch_min:

959 case AtomicExpr::AO__scoped_atomic_fetch_add:

960 case AtomicExpr::AO__scoped_atomic_fetch_max:

961 case AtomicExpr::AO__scoped_atomic_fetch_min:

962 case AtomicExpr::AO__scoped_atomic_fetch_sub:

963 case AtomicExpr::AO__scoped_atomic_add_fetch:

964 case AtomicExpr::AO__scoped_atomic_max_fetch:

965 case AtomicExpr::AO__scoped_atomic_min_fetch:

966 case AtomicExpr::AO__scoped_atomic_sub_fetch:

968 [[fallthrough]];

969

970 case AtomicExpr::AO__atomic_fetch_and:

971 case AtomicExpr::AO__atomic_fetch_nand:

972 case AtomicExpr::AO__atomic_fetch_or:

973 case AtomicExpr::AO__atomic_fetch_xor:

974 case AtomicExpr::AO__atomic_and_fetch:

975 case AtomicExpr::AO__atomic_nand_fetch:

976 case AtomicExpr::AO__atomic_or_fetch:

977 case AtomicExpr::AO__atomic_xor_fetch:

978 case AtomicExpr::AO__atomic_store_n:

979 case AtomicExpr::AO__atomic_exchange_n:

980 case AtomicExpr::AO__c11_atomic_fetch_and:

981 case AtomicExpr::AO__c11_atomic_fetch_nand:

982 case AtomicExpr::AO__c11_atomic_fetch_or:

983 case AtomicExpr::AO__c11_atomic_fetch_xor:

984 case AtomicExpr::AO__c11_atomic_store:

985 case AtomicExpr::AO__c11_atomic_exchange:

986 case AtomicExpr::AO__hip_atomic_fetch_and:

987 case AtomicExpr::AO__hip_atomic_fetch_or:

988 case AtomicExpr::AO__hip_atomic_fetch_xor:

989 case AtomicExpr::AO__hip_atomic_store:

990 case AtomicExpr::AO__hip_atomic_exchange:

991 case AtomicExpr::AO__opencl_atomic_fetch_and:

992 case AtomicExpr::AO__opencl_atomic_fetch_or:

993 case AtomicExpr::AO__opencl_atomic_fetch_xor:

994 case AtomicExpr::AO__opencl_atomic_store:

995 case AtomicExpr::AO__opencl_atomic_exchange:

996 case AtomicExpr::AO__scoped_atomic_fetch_and:

997 case AtomicExpr::AO__scoped_atomic_fetch_nand:

998 case AtomicExpr::AO__scoped_atomic_fetch_or:

999 case AtomicExpr::AO__scoped_atomic_fetch_xor:

1000 case AtomicExpr::AO__scoped_atomic_and_fetch:

1001 case AtomicExpr::AO__scoped_atomic_nand_fetch:

1002 case AtomicExpr::AO__scoped_atomic_or_fetch:

1003 case AtomicExpr::AO__scoped_atomic_xor_fetch:

1004 case AtomicExpr::AO__scoped_atomic_store_n:

1005 case AtomicExpr::AO__scoped_atomic_exchange_n:

1007 break;

1008 }

1009

1011

1012

1013

1014

1016 AtomicInfo Atomics(*this, AtomicVal);

1017

1018 if (ShouldCastToIntPtrTy) {

1019 Ptr = Atomics.castToAtomicIntPointer(Ptr);

1021 Val1 = Atomics.convertToAtomicIntPointer(Val1);

1023 Val2 = Atomics.convertToAtomicIntPointer(Val2);

1024 }

1026 if (ShouldCastToIntPtrTy)

1027 Dest = Atomics.castToAtomicIntPointer(Dest);

1028 } else if (E->isCmpXChg())

1031 Dest = Atomics.CreateTempAlloca();

1032 if (ShouldCastToIntPtrTy)

1033 Dest = Atomics.castToAtomicIntPointer(Dest);

1034 }

1035

1036 bool PowerOf2Size = (Size & (Size - 1)) == 0;

1037 bool UseLibcall = !PowerOf2Size || (Size > 16);

1038

1039

1040

1041

1042

1043

1044

1045

1046

1047

1048 if (UseLibcall) {

1050

1053

1054

1055

1056

1057 auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {

1058 if (E->isOpenCL())

1059 return V;

1062 return V;

1064 auto *DestType = llvm::PointerType::get(getLLVMContext(), DestAS);

1065

1068 };

1069

1073

1074

1075 std::string LibCallName;

1077 bool HaveRetTy = false;

1078 switch (E->getOp()) {

1079 case AtomicExpr::AO__c11_atomic_init:

1080 case AtomicExpr::AO__opencl_atomic_init:

1081 llvm_unreachable("Already handled!");

1082

1083

1084

1085

1086

1087

1088 case AtomicExpr::AO__atomic_compare_exchange:

1089 case AtomicExpr::AO__atomic_compare_exchange_n:

1090 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:

1091 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:

1092 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:

1093 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:

1094 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:

1095 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:

1096 case AtomicExpr::AO__scoped_atomic_compare_exchange:

1097 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:

1098 LibCallName = "__atomic_compare_exchange";

1100 HaveRetTy = true;

1108 Order = OrderFail;

1109 break;

1110

1111

1112 case AtomicExpr::AO__atomic_exchange:

1113 case AtomicExpr::AO__atomic_exchange_n:

1114 case AtomicExpr::AO__c11_atomic_exchange:

1115 case AtomicExpr::AO__hip_atomic_exchange:

1116 case AtomicExpr::AO__opencl_atomic_exchange:

1117 case AtomicExpr::AO__scoped_atomic_exchange:

1118 case AtomicExpr::AO__scoped_atomic_exchange_n:

1119 LibCallName = "__atomic_exchange";

1123 break;

1124

1125 case AtomicExpr::AO__atomic_store:

1126 case AtomicExpr::AO__atomic_store_n:

1127 case AtomicExpr::AO__c11_atomic_store:

1128 case AtomicExpr::AO__hip_atomic_store:

1129 case AtomicExpr::AO__opencl_atomic_store:

1130 case AtomicExpr::AO__scoped_atomic_store:

1131 case AtomicExpr::AO__scoped_atomic_store_n:

1132 LibCallName = "__atomic_store";

1134 HaveRetTy = true;

1138 break;

1139

1140 case AtomicExpr::AO__atomic_load:

1141 case AtomicExpr::AO__atomic_load_n:

1142 case AtomicExpr::AO__c11_atomic_load:

1143 case AtomicExpr::AO__hip_atomic_load:

1144 case AtomicExpr::AO__opencl_atomic_load:

1145 case AtomicExpr::AO__scoped_atomic_load:

1146 case AtomicExpr::AO__scoped_atomic_load_n:

1147 LibCallName = "__atomic_load";

1148 break;

1149 case AtomicExpr::AO__atomic_add_fetch:

1150 case AtomicExpr::AO__scoped_atomic_add_fetch:

1151 case AtomicExpr::AO__atomic_fetch_add:

1152 case AtomicExpr::AO__c11_atomic_fetch_add:

1153 case AtomicExpr::AO__hip_atomic_fetch_add:

1154 case AtomicExpr::AO__opencl_atomic_fetch_add:

1155 case AtomicExpr::AO__scoped_atomic_fetch_add:

1156 case AtomicExpr::AO__atomic_and_fetch:

1157 case AtomicExpr::AO__scoped_atomic_and_fetch:

1158 case AtomicExpr::AO__atomic_fetch_and:

1159 case AtomicExpr::AO__c11_atomic_fetch_and:

1160 case AtomicExpr::AO__hip_atomic_fetch_and:

1161 case AtomicExpr::AO__opencl_atomic_fetch_and:

1162 case AtomicExpr::AO__scoped_atomic_fetch_and:

1163 case AtomicExpr::AO__atomic_or_fetch:

1164 case AtomicExpr::AO__scoped_atomic_or_fetch:

1165 case AtomicExpr::AO__atomic_fetch_or:

1166 case AtomicExpr::AO__c11_atomic_fetch_or:

1167 case AtomicExpr::AO__hip_atomic_fetch_or:

1168 case AtomicExpr::AO__opencl_atomic_fetch_or:

1169 case AtomicExpr::AO__scoped_atomic_fetch_or:

1170 case AtomicExpr::AO__atomic_sub_fetch:

1171 case AtomicExpr::AO__scoped_atomic_sub_fetch:

1172 case AtomicExpr::AO__atomic_fetch_sub:

1173 case AtomicExpr::AO__c11_atomic_fetch_sub:

1174 case AtomicExpr::AO__hip_atomic_fetch_sub:

1175 case AtomicExpr::AO__opencl_atomic_fetch_sub:

1176 case AtomicExpr::AO__scoped_atomic_fetch_sub:

1177 case AtomicExpr::AO__atomic_xor_fetch:

1178 case AtomicExpr::AO__scoped_atomic_xor_fetch:

1179 case AtomicExpr::AO__atomic_fetch_xor:

1180 case AtomicExpr::AO__c11_atomic_fetch_xor:

1181 case AtomicExpr::AO__hip_atomic_fetch_xor:

1182 case AtomicExpr::AO__opencl_atomic_fetch_xor:

1183 case AtomicExpr::AO__scoped_atomic_fetch_xor:

1184 case AtomicExpr::AO__atomic_nand_fetch:

1185 case AtomicExpr::AO__atomic_fetch_nand:

1186 case AtomicExpr::AO__c11_atomic_fetch_nand:

1187 case AtomicExpr::AO__scoped_atomic_fetch_nand:

1188 case AtomicExpr::AO__scoped_atomic_nand_fetch:

1189 case AtomicExpr::AO__atomic_min_fetch:

1190 case AtomicExpr::AO__atomic_fetch_min:

1191 case AtomicExpr::AO__c11_atomic_fetch_min:

1192 case AtomicExpr::AO__hip_atomic_fetch_min:

1193 case AtomicExpr::AO__opencl_atomic_fetch_min:

1194 case AtomicExpr::AO__scoped_atomic_fetch_min:

1195 case AtomicExpr::AO__scoped_atomic_min_fetch:

1196 case AtomicExpr::AO__atomic_max_fetch:

1197 case AtomicExpr::AO__atomic_fetch_max:

1198 case AtomicExpr::AO__c11_atomic_fetch_max:

1199 case AtomicExpr::AO__hip_atomic_fetch_max:

1200 case AtomicExpr::AO__opencl_atomic_fetch_max:

1201 case AtomicExpr::AO__scoped_atomic_fetch_max:

1202 case AtomicExpr::AO__scoped_atomic_max_fetch:

1203 llvm_unreachable("Integral atomic operations always become atomicrmw!");

1204 }

1205

1206 if (E->isOpenCL()) {

1207 LibCallName =

1208 std::string("__opencl") + StringRef(LibCallName).drop_front(1).str();

1209 }

1210

1211 if (!HaveRetTy) {

1212

1215 CastToGenericAddrSpace(Dest.emitRawPointer(*this), RetTy)),

1217 }

1218

1221 if (E->isOpenCL())

1223

1225

1226 if (E->isCmpXChg())

1227 return Res;

1228

1231

1234 }

1235

1236 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||

1237 E->getOp() == AtomicExpr::AO__opencl_atomic_store ||

1238 E->getOp() == AtomicExpr::AO__hip_atomic_store ||

1239 E->getOp() == AtomicExpr::AO__atomic_store ||

1240 E->getOp() == AtomicExpr::AO__atomic_store_n ||

1241 E->getOp() == AtomicExpr::AO__scoped_atomic_store ||

1242 E->getOp() == AtomicExpr::AO__scoped_atomic_store_n;

1243 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||

1244 E->getOp() == AtomicExpr::AO__opencl_atomic_load ||

1245 E->getOp() == AtomicExpr::AO__hip_atomic_load ||

1246 E->getOp() == AtomicExpr::AO__atomic_load ||

1247 E->getOp() == AtomicExpr::AO__atomic_load_n ||

1248 E->getOp() == AtomicExpr::AO__scoped_atomic_load ||

1249 E->getOp() == AtomicExpr::AO__scoped_atomic_load_n;

1250

1251 if (isallvm::ConstantInt(Order)) {

1252 auto ord = castllvm::ConstantInt(Order)->getZExtValue();

1253

1254

1255 if (llvm::isValidAtomicOrderingCABI(ord))

1256 switch ((llvm::AtomicOrderingCABI)ord) {

1257 case llvm::AtomicOrderingCABI::relaxed:

1258 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,

1259 llvm::AtomicOrdering::Monotonic, Scope);

1260 break;

1261 case llvm::AtomicOrderingCABI::consume:

1262 case llvm::AtomicOrderingCABI::acquire:

1263 if (IsStore)

1264 break;

1265 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,

1266 llvm::AtomicOrdering::Acquire, Scope);

1267 break;

1268 case llvm::AtomicOrderingCABI::release:

1269 if (IsLoad)

1270 break;

1271 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,

1272 llvm::AtomicOrdering::Release, Scope);

1273 break;

1274 case llvm::AtomicOrderingCABI::acq_rel:

1275 if (IsLoad || IsStore)

1276 break;

1277 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,

1278 llvm::AtomicOrdering::AcquireRelease, Scope);

1279 break;

1280 case llvm::AtomicOrderingCABI::seq_cst:

1281 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,

1282 llvm::AtomicOrdering::SequentiallyConsistent, Scope);

1283 break;

1284 }

1287

1290 }

1291

1292

1293

1294

1295 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,

1296 *ReleaseBB = nullptr, *AcqRelBB = nullptr,

1297 *SeqCstBB = nullptr;

1299 if (!IsStore)

1301 if (!IsLoad)

1303 if (!IsLoad && !IsStore)

1307

1308

1309

1310

1311

1312 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);

1313 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);

1314

1315

1316 Builder.SetInsertPoint(MonotonicBB);

1317 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,

1318 llvm::AtomicOrdering::Monotonic, Scope);

1319 Builder.CreateBr(ContBB);

1320 if (!IsStore) {

1321 Builder.SetInsertPoint(AcquireBB);

1322 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,

1323 llvm::AtomicOrdering::Acquire, Scope);

1324 Builder.CreateBr(ContBB);

1325 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),

1326 AcquireBB);

1327 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),

1328 AcquireBB);

1329 }

1330 if (!IsLoad) {

1331 Builder.SetInsertPoint(ReleaseBB);

1332 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,

1333 llvm::AtomicOrdering::Release, Scope);

1334 Builder.CreateBr(ContBB);

1335 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),

1336 ReleaseBB);

1337 }

1338 if (!IsLoad && !IsStore) {

1339 Builder.SetInsertPoint(AcqRelBB);

1340 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,

1341 llvm::AtomicOrdering::AcquireRelease, Scope);

1342 Builder.CreateBr(ContBB);

1343 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),

1344 AcqRelBB);

1345 }

1346 Builder.SetInsertPoint(SeqCstBB);

1347 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,

1348 llvm::AtomicOrdering::SequentiallyConsistent, Scope);

1349 Builder.CreateBr(ContBB);

1350 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),

1351 SeqCstBB);

1352

1353

1354 Builder.SetInsertPoint(ContBB);

1357

1358 assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());

1361}

1362

1363Address AtomicInfo::castToAtomicIntPointer(Address addr) const {

1364 llvm::IntegerType *ty =

1365 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);

1367}

1368

1369Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {

1372 if (SourceSizeInBits != AtomicSizeInBits) {

1373 Address Tmp = CreateTempAlloca();

1375 std::min(AtomicSizeInBits, SourceSizeInBits) / 8);

1376 Addr = Tmp;

1377 }

1378

1379 return castToAtomicIntPointer(Addr);

1380}

1381

1382RValue AtomicInfo::convertAtomicTempToRValue(Address addr,

1385 bool asValue) const {

1388 return resultSlot.asRValue();

1389

1390

1391 if (hasPadding())

1393

1394

1395

1397 }

1398 if (!asValue)

1399

1413}

1414

1415

1416

1417

1418

1419

1421 if (ValTy->isFloatingPointTy())

1422 return ValTy->isX86_FP80Ty() || CmpXchg;

1423 return !ValTy->isIntegerTy() && !ValTy->isPointerTy();

1424}

1425

1426RValue AtomicInfo::ConvertToValueOrAtomic(llvm::Value *Val,

1429 bool CmpXchg) const {

1430

1431 assert((Val->getType()->isIntegerTy() || Val->getType()->isPointerTy() ||

1432 Val->getType()->isIEEELikeFPTy()) &&

1433 "Expected integer, pointer or floating point value when converting "

1434 "result.");

1435 if (getEvaluationKind() == TEK_Scalar &&

1438 !hasPadding()) ||

1439 !AsValue)) {

1440 auto *ValTy = AsValue

1442 : getAtomicAddress().getElementType();

1444 assert((!ValTy->isIntegerTy() || Val->getType() == ValTy) &&

1445 "Different integer types.");

1447 }

1448 if (llvm::CastInst::isBitCastable(Val->getType(), ValTy))

1450 }

1451

1452

1453

1455 bool TempIsVolatile = false;

1456 if (AsValue && getEvaluationKind() == TEK_Aggregate) {

1457 assert(!ResultSlot.isIgnored());

1459 TempIsVolatile = ResultSlot.isVolatile();

1460 } else {

1461 Temp = CreateTempAlloca();

1462 }

1463

1464

1465 Address CastTemp = castToAtomicIntPointer(Temp);

1466 CGF.Builder.CreateStore(Val, CastTemp)->setVolatile(TempIsVolatile);

1467

1468 return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);

1469}

1470

1471void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,

1472 llvm::AtomicOrdering AO, bool) {

1473

1478 Args.add(

1479 RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),

1482}

1483

1484llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,

1485 bool IsVolatile, bool CmpXchg) {

1486

1487 Address Addr = getAtomicAddress();

1489 Addr = castToAtomicIntPointer(Addr);

1491 Load->setAtomic(AO);

1492

1493

1494 if (IsVolatile)

1495 Load->setVolatile(true);

1497 return Load;

1498}

1499

1500

1501

1502

1505 AtomicInfo AI(*this, LV);

1507

1508 bool AtomicIsInline = !AI.shouldUseLibcall();

1509

1512 return false;

1513 return IsVolatile && AtomicIsInline;

1514}

1515

1518 llvm::AtomicOrdering AO;

1521 AO = llvm::AtomicOrdering::SequentiallyConsistent;

1522 } else {

1523 AO = llvm::AtomicOrdering::Acquire;

1524 IsVolatile = true;

1525 }

1526 return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);

1527}

1528

1530 bool AsValue, llvm::AtomicOrdering AO,

1531 bool IsVolatile) {

1532

1533 if (shouldUseLibcall()) {

1538 } else

1539 TempAddr = CreateTempAlloca();

1540

1541 EmitAtomicLoadLibcall(TempAddr.emitRawPointer(CGF), AO, IsVolatile);

1542

1543

1544

1545 return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);

1546 }

1547

1548

1549 auto *Load = EmitAtomicLoadOp(AO, IsVolatile);

1550

1551

1554

1555

1556

1557 return ConvertToValueOrAtomic(Load, ResultSlot, Loc, AsValue);

1558}

1559

1560

1561

1563 llvm::AtomicOrdering AO, bool IsVolatile,

1565 AtomicInfo Atomics(*this, src);

1566 return Atomics.EmitAtomicLoad(resultSlot, loc, true, AO,

1567 IsVolatile);

1568}

1569

1570

1571

1572void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {

1574

1575

1576

1580 getAtomicType());

1585 return;

1586 }

1587

1588

1589

1590

1591 emitMemSetZeroIfNecessary();

1592

1593

1594 LValue TempLVal = projectValue();

1595

1596

1599 } else {

1601 }

1602}

1603

1604

1605

1606

1607Address AtomicInfo::materializeRValue(RValue rvalue) const {

1608

1609

1612

1613

1615 AtomicInfo Atomics(CGF, TempLV);

1616 Atomics.emitCopyIntoMemory(rvalue);

1618}

1619

1620llvm::Value *AtomicInfo::getScalarRValValueOrNull(RValue RVal) const {

1623 return nullptr;

1624}

1625

1626llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal, bool CmpXchg) const {

1627

1628

1629 if (llvm::Value *Value = getScalarRValValueOrNull(RVal)) {

1632 else {

1633 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(

1635 LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());

1636 if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))

1637 return CGF.Builder.CreateBitCast(Value, InputIntTy);

1638 }

1639 }

1640

1641

1642 Address Addr = materializeRValue(RVal);

1643

1644

1645 Addr = castToAtomicIntPointer(Addr);

1647}

1648

1649std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(

1650 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,

1651 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {

1652

1653 Address Addr = getAtomicAddressAsAtomicIntPointer();

1656

1658 Inst->setWeak(IsWeak);

1659

1660

1661 auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, 0);

1662 auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, 1);

1663 return std::make_pair(PreviousVal, SuccessFailureVal);

1664}

1665

1666llvm::Value *

1667AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,

1668 llvm::Value *DesiredAddr,

1669 llvm::AtomicOrdering Success,

1670 llvm::AtomicOrdering Failure) {

1671

1672

1679 llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),

1682 llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),

1684 auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",

1686

1687 return SuccessFailureRVal.getScalarVal();

1688}

1689

1690std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(

1692 llvm::AtomicOrdering Failure, bool IsWeak) {

1693

1694 if (shouldUseLibcall()) {

1695

1697 llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);

1698 llvm::Value *DesiredPtr = materializeRValue(Desired).emitRawPointer(CGF);

1699 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr,

1701 return std::make_pair(

1704 Res);

1705 }

1706

1707

1708

1709 auto *ExpectedVal = convertRValueToInt(Expected, true);

1710 auto *DesiredVal = convertRValueToInt(Desired, true);

1711 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,

1712 Failure, IsWeak);

1713 return std::make_pair(

1716 true),

1718}

1719

1720static void

1722 const llvm::function_ref<RValue(RValue)> &UpdateOp,

1725 LValue AtomicLVal = Atomics.getAtomicLValue();

1727 if (AtomicLVal.isSimple()) {

1728 UpRVal = OldRVal;

1730 } else {

1731

1732 Address Ptr = Atomics.materializeRValue(OldRVal);

1735 UpdateLVal =

1740 DesiredLVal =

1752 } else {

1761 }

1763 }

1764

1765 RValue NewRVal = UpdateOp(UpRVal);

1768 } else {

1771 false);

1772 }

1773}

1774

1775void AtomicInfo::EmitAtomicUpdateLibcall(

1776 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,

1777 bool IsVolatile) {

1778 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);

1779

1780 Address ExpectedAddr = CreateTempAlloca();

1781

1782 EmitAtomicLoadLibcall(ExpectedAddr.emitRawPointer(CGF), AO, IsVolatile);

1786 Address DesiredAddr = CreateTempAlloca();

1787 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||

1788 requiresMemSetZero(getAtomicAddress().getElementType())) {

1791 }

1792 auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,

1796 llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);

1797 llvm::Value *DesiredPtr = DesiredAddr.emitRawPointer(CGF);

1798 auto *Res =

1799 EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);

1800 CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);

1801 CGF.EmitBlock(ExitBB, true);

1802}

1803

1804void AtomicInfo::EmitAtomicUpdateOp(

1805 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,

1806 bool IsVolatile) {

1807 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);

1808

1809

1810 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile, true);

1811

1814 auto *CurBB = CGF.Builder.GetInsertBlock();

1816 llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),

1817 2);

1818 PHI->addIncoming(OldVal, CurBB);

1819 Address NewAtomicAddr = CreateTempAlloca();

1820 Address NewAtomicIntAddr =

1822 ? castToAtomicIntPointer(NewAtomicAddr)

1823 : NewAtomicAddr;

1824

1825 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||

1826 requiresMemSetZero(getAtomicAddress().getElementType())) {

1828 }

1831 true);

1834

1835 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);

1836 PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());

1837 CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);

1838 CGF.EmitBlock(ExitBB, true);

1839}

1840

1843 LValue AtomicLVal = Atomics.getAtomicLValue();

1845

1847 DesiredLVal =

1852 DesiredLVal =

1856 } else {

1861 }

1862

1863 assert(UpdateRVal.isScalar());

1865}

1866

1867void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,

1868 RValue UpdateRVal, bool IsVolatile) {

1869 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);

1870

1871 Address ExpectedAddr = CreateTempAlloca();

1872

1873 EmitAtomicLoadLibcall(ExpectedAddr.emitRawPointer(CGF), AO, IsVolatile);

1877 Address DesiredAddr = CreateTempAlloca();

1878 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||

1879 requiresMemSetZero(getAtomicAddress().getElementType())) {

1882 }

1884 llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);

1885 llvm::Value *DesiredPtr = DesiredAddr.emitRawPointer(CGF);

1886 auto *Res =

1887 EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);

1888 CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);

1889 CGF.EmitBlock(ExitBB, true);

1890}

1891

1892void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,

1893 bool IsVolatile) {

1894 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);

1895

1896

1897 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile, true);

1898

1901 auto *CurBB = CGF.Builder.GetInsertBlock();

1903 llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),

1904 2);

1905 PHI->addIncoming(OldVal, CurBB);

1906 Address NewAtomicAddr = CreateTempAlloca();

1907 Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr);

1908 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||

1909 requiresMemSetZero(getAtomicAddress().getElementType())) {

1911 }

1914

1915 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);

1916 PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());

1917 CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);

1918 CGF.EmitBlock(ExitBB, true);

1919}

1920

1921void AtomicInfo::EmitAtomicUpdate(

1922 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,

1923 bool IsVolatile) {

1924 if (shouldUseLibcall()) {

1925 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);

1926 } else {

1927 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);

1928 }

1929}

1930

1931void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,

1932 bool IsVolatile) {

1933 if (shouldUseLibcall()) {

1934 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);

1935 } else {

1936 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);

1937 }

1938}

1939

1941 bool isInit) {

1943 llvm::AtomicOrdering AO;

1945 AO = llvm::AtomicOrdering::SequentiallyConsistent;

1946 } else {

1947 AO = llvm::AtomicOrdering::Release;

1948 IsVolatile = true;

1949 }

1950 return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);

1951}

1952

1953

1954

1955

1956

1957

1959 llvm::AtomicOrdering AO, bool IsVolatile,

1960 bool isInit) {

1961

1962

1966

1967 AtomicInfo atomics(*this, dest);

1968 LValue LVal = atomics.getAtomicLValue();

1969

1970

1972 if (isInit) {

1973 atomics.emitCopyIntoMemory(rvalue);

1974 return;

1975 }

1976

1977

1978 if (atomics.shouldUseLibcall()) {

1979

1980 Address srcAddr = atomics.materializeRValue(rvalue);

1981

1982

1989 args.add(

1990 RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),

1993 return;

1994 }

1995

1996

1997 llvm::Value *ValToStore = atomics.convertRValueToInt(rvalue);

1998

1999

2000 Address Addr = atomics.getAtomicAddress();

2001 if (llvm::Value *Value = atomics.getScalarRValValueOrNull(rvalue))

2003 Addr = atomics.castToAtomicIntPointer(Addr);

2005 false);

2006 }

2008

2009 if (AO == llvm::AtomicOrdering::Acquire)

2010 AO = llvm::AtomicOrdering::Monotonic;

2011 else if (AO == llvm::AtomicOrdering::AcquireRelease)

2012 AO = llvm::AtomicOrdering::Release;

2013

2014 if (!isInit)

2015 store->setAtomic(AO);

2016

2017

2018 if (IsVolatile)

2019 store->setVolatile(true);

2021 return;

2022 }

2023

2024

2025 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);

2026}

2027

2028

2029

2032 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,

2034

2035

2036 assert(Expected.isAggregate() ||

2037 Expected.getAggregateAddress().getElementType() ==

2042 AtomicInfo Atomics(*this, Obj);

2043

2044 return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,

2045 IsWeak);

2046}

2047

2048llvm::AtomicRMWInst *

2050 llvm::Value *Val, llvm::AtomicOrdering Order,

2051 llvm::SyncScope::ID SSID,

2053 llvm::AtomicRMWInst *RMW =

2056 return RMW;

2057}

2058

2060 LValue LVal, llvm::AtomicOrdering AO,

2061 const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {

2062 AtomicInfo Atomics(*this, LVal);

2063 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);

2064}

2065

2067 AtomicInfo atomics(*this, dest);

2068

2069 switch (atomics.getEvaluationKind()) {

2072 atomics.emitCopyIntoMemory(RValue::get(value));

2073 return;

2074 }

2075

2079 return;

2080 }

2081

2083

2084

2085 bool Zeroed = false;

2087 Zeroed = atomics.emitMemSetZeroIfNecessary();

2088 dest = atomics.projectValue();

2089 }

2090

2091

2097

2099 return;

2100 }

2101 }

2102 llvm_unreachable("bad evaluation kind");

2103}

Defines the clang::ASTContext interface.

static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type, uint64_t expectedSize)

Does a store of the given IR type modify the full expected width?

static llvm::Value * EmitPostAtomicMinMax(CGBuilderTy &Builder, AtomicExpr::AtomicOp Op, bool IsSigned, llvm::Value *OldVal, llvm::Value *RHS)

Duplicate the atomic min/max operation in conventional IR for the builtin variants that return the ne...

static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, Address DesiredAddr)

static Address EmitValToTemp(CodeGenFunction &CGF, Expr *E)

static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, llvm::AtomicOrdering Order, llvm::SyncScope::ID Scope)

static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)

static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *FailureOrderVal, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope)

Given an ordering required on success, emit all possible cmpxchg instructions to cope with the provid...

static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::AtomicOrdering FailureOrder, llvm::SyncScope::ID Scope)

static bool shouldCastToInt(llvm::Type *ValTy, bool CmpXchg)

Return true if.

CodeGenFunction::ComplexPairTy ComplexPairTy

static QualType getPointeeType(const MemRegion *R)

Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...

CanQualType getSizeType() const

Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.

TypeInfoChars getTypeInfoInChars(const Type *T) const

int64_t toBits(CharUnits CharSize) const

Convert a size in characters to a size in bits.

CharUnits getTypeSizeInChars(QualType T) const

Return the size of the specified (complete) type T, in characters.

QualType getExtVectorType(QualType VectorType, unsigned NumElts) const

Return the unique reference to an extended vector type of the specified element type and size.

CharUnits toCharUnitsFromBits(int64_t BitSize) const

Convert a size in bits to a size in characters.

unsigned getTargetAddressSpace(LangAS AS) const

AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...

CharUnits - This is an opaque type for sizes expressed in character units.

bool isZero() const

isZero - Test whether the quantity equals zero.

llvm::Align getAsAlign() const

getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...

QuantityType getQuantity() const

getQuantity - Get the raw integer representation of this quantity.

Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...

llvm::Value * emitRawPointer(CodeGenFunction &CGF) const

Return the pointer contained in this class after authenticating it and adding offset to it if necessa...

CharUnits getAlignment() const

llvm::Type * getElementType() const

Return the type of the values stored in this address.

Address withElementType(llvm::Type *ElemTy) const

Return address with different element type, but same pointer and alignment.

static AggValueSlot ignored()

ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.

Address getAddress() const

static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)

llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)

Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")

llvm::AtomicRMWInst * CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Ordering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)

llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)

Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")

llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(Address Addr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)

llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")

llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)

Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")

static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())

CGFunctionInfo - Class to encapsulate the information about a function definition.

CallArgList - Type for representing both the value and type of arguments in a call.

void add(RValue rvalue, QualType type)

CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...

llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)

EmitFromMemory - Change a scalar value from its memory representation to its value representation.

std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())

static TypeEvaluationKind getEvaluationKind(QualType T)

getEvaluationKind - Return the TypeEvaluationKind of QualType T.

void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)

EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...

RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())

bool hasVolatileMember(QualType T)

hasVolatileMember - returns true if aggregate type has a volatile member.

llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)

createBasicBlock - Create an LLVM basic block.

void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)

const LangOptions & getLangOpts() const

void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)

EmitBlock - Emit the given block.

ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)

EmitComplexExpr - Emit the computation of the specified expression of complex type,...

RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)

EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...

RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)

void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)

EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...

llvm::Type * ConvertTypeForMem(QualType T)

RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)

CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...

void EmitAtomicInit(Expr *E, LValue lvalue)

const TargetInfo & getTarget() const

llvm::Value * getTypeSize(QualType Ty)

Returns calculated size of the specified type.

Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)

EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...

RValue EmitLoadOfExtVectorElementLValue(LValue V)

void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)

EmitAggregateCopy - Emit an aggregate copy.

const TargetCodeGenInfo & getTargetHooks() const

void EmitAggExpr(const Expr *E, AggValueSlot AS)

EmitAggExpr - Emit the computation of the specified expression of aggregate type.

llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)

EmitToMemory - Change a scalar value from its value representation to its in-memory representation.

RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)

EmitCall - Generate a call of the given function, expecting the given result type,...

ASTContext & getContext() const

LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)

void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)

EmitStoreOfComplex - Store a complex number into the specified l-value.

void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)

llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System, const AtomicExpr *AE=nullptr)

Emit an atomicrmw instruction, and applying relevant metadata when applicable.

llvm::LLVMContext & getLLVMContext()

llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)

EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...

bool LValueIsSuitableForInlineAtomic(LValue Src)

void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)

EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...

RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)

This class organizes the cross-function state that is used while generating LLVM code.

llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)

Create or return a runtime function declaration with the specified type and name.

DiagnosticsEngine & getDiags() const

const LangOptions & getLangOpts() const

CodeGenTypes & getTypes()

const llvm::DataLayout & getDataLayout() const

void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)

DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.

llvm::LLVMContext & getLLVMContext()

llvm::ConstantInt * getSize(CharUnits numChars)

Emit the given number of characters as a value of type size_t.

llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)

GetFunctionType - Get the LLVM function type for.

const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)

LValue - This represents an lvalue references.

llvm::Value * getRawExtVectorPointer(CodeGenFunction &CGF) const

llvm::Constant * getExtVectorElts() const

void setAlignment(CharUnits A)

bool isVolatileQualified() const

llvm::Value * getRawBitFieldPointer(CodeGenFunction &CGF) const

CharUnits getAlignment() const

static LValue MakeExtVectorElt(Address Addr, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)

Address getAddress() const

llvm::Value * getRawVectorPointer(CodeGenFunction &CGF) const

bool isExtVectorElt() const

llvm::Value * getVectorIdx() const

LValueBaseInfo getBaseInfo() const

const CGBitFieldInfo & getBitFieldInfo() const

TBAAAccessInfo getTBAAInfo() const

Address getVectorAddress() const

static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)

Create a new object to represent a bit-field access.

llvm::Value * emitRawPointer(CodeGenFunction &CGF) const

static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)

Address getExtVectorAddress() const

Address getBitFieldAddress() const

RValue - This trivial value class is used to represent the result of an expression that is evaluated.

static RValue get(llvm::Value *V)

static RValue getAggregate(Address addr, bool isVolatile=false)

Convert an Address to an RValue.

static RValue getComplex(llvm::Value *V1, llvm::Value *V2)

Address getAggregateAddress() const

getAggregateAddr() - Return the Value* of the address of the aggregate.

llvm::Value * getScalarVal() const

getScalarVal() - Return the Value* of this scalar value.

bool isVolatileQualified() const

std::pair< llvm::Value *, llvm::Value * > getComplexVal() const

getComplexVal - Return the real/imag components of this complex value.

ReturnValueSlot - Contains the address where the return value of a function can be stored,...

Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const

virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const

Get the syncscope used in LLVM IR.

virtual void setTargetAtomicMetadata(CodeGenFunction &CGF, llvm::Instruction &AtomicInst, const AtomicExpr *Expr=nullptr) const

Allow the target to apply other metadata to an atomic instruction.

Concrete class used by the front-end to report problems and issues.

DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)

Issue the message to the client.

This represents one expression.

SourceLocation getExprLoc() const LLVM_READONLY

getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...

PointerType - C99 6.7.5.1 - Pointer Declarators.

A (possibly-)qualified type.

bool isNull() const

Return true if this QualType doesn't point to a type yet.

LangAS getAddressSpace() const

Return the address space of this type.

Qualifiers getQualifiers() const

Retrieve the set of qualifiers applied to this type.

QualType getUnqualifiedType() const

Retrieve the unqualified variant of the given type, removing as little sugar as possible.

Scope - A scope is a transient data structure that is used while parsing the program.

Encodes a location in the source.

SourceLocation getBeginLoc() const LLVM_READONLY

unsigned getMaxAtomicInlineWidth() const

Return the maximum width lock-free atomic operation which can be inlined given the supported features...

bool isPointerType() const

const T * castAs() const

Member-template castAs.

QualType getPointeeType() const

If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.

bool isAtomicType() const

bool isFloatingType() const

const T * getAs() const

Member-template getAs'.

Represents a GCC generic vector type.

TypeEvaluationKind

The kind of evaluation to perform on values of a particular type.

const internal::VariadicAllOfMatcher< Type > type

Matches Types in the clang AST.

bool Load(InterpState &S, CodePtr OpPC)

The JSON file list parser is used to communicate input to InstallAPI.

llvm::StringRef getAsString(SyncScope S)

@ Success

Template argument deduction was successful.

Structure with information about how a bitfield should be accessed.

CharUnits StorageOffset

The offset of the bitfield storage from the start of the struct.

unsigned Offset

The offset within a contiguous run of bitfields that are represented as a single "field" within the L...

unsigned Size

The total size of the bit-field, in bits.

unsigned StorageSize

The storage size in bits which should be used when accessing this bitfield.

llvm::PointerType * VoidPtrTy

llvm::IntegerType * Int8Ty

i8, i16, i32, and i64

llvm::IntegerType * SizeTy

llvm::IntegerType * IntTy

int

llvm::PointerType * UnqualPtrTy