clang: lib/CodeGen/CGAtomic.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

21#include "llvm/ADT/DenseMap.h"

22#include "llvm/IR/DataLayout.h"

23#include "llvm/IR/Intrinsics.h"

24

25using namespace clang;

26using namespace CodeGen;

27

28namespace {

29 class AtomicInfo {

38 bool UseLibcall;

41 public:

43 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),

48 AtomicTy = lvalue.getType();

50 ValueTy = ATy->getValueType();

51 else

52 ValueTy = AtomicTy;

54

57 TypeInfo ValueTI = C.getTypeInfo(ValueTy);

58 ValueSizeInBits = ValueTI.Width;

59 ValueAlignInBits = ValueTI.Align;

60

61 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);

62 AtomicSizeInBits = AtomicTI.Width;

63 AtomicAlignInBits = AtomicTI.Align;

64

65 assert(ValueSizeInBits <= AtomicSizeInBits);

66 assert(ValueAlignInBits <= AtomicAlignInBits);

67

68 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);

69 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);

72

73 LVal = lvalue;

75 ValueTy = lvalue.getType();

76 ValueSizeInBits = C.getTypeSize(ValueTy);

78 auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());

79 AtomicSizeInBits = C.toBits(

80 C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)

83 auto OffsetInChars =

84 (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *

86 llvm::Value *StoragePtr = CGF.Builder.CreateConstGEP1_64(

87 CGF.Int8Ty, BitFieldPtr, OffsetInChars.getQuantity());

89 StoragePtr, CGF.UnqualPtrTy, "atomic_bitfield_base");

90 BFI = OrigBFI;

94 llvm::Type *StorageTy = CGF.Builder.getIntNTy(AtomicSizeInBits);

95 LVal = LValue::MakeBitfield(

98 AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);

99 if (AtomicTy.isNull()) {

100 llvm::APInt Size(

101 32,

102 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());

103 AtomicTy = C.getConstantArrayType(C.CharTy, Size, nullptr,

104 ArraySizeModifier::Normal,

105 0);

106 }

107 AtomicAlign = ValueAlign = lvalue.getAlignment();

110 ValueSizeInBits = C.getTypeSize(ValueTy);

111 AtomicTy = lvalue.getType();

112 AtomicSizeInBits = C.getTypeSize(AtomicTy);

113 AtomicAlign = ValueAlign = lvalue.getAlignment();

114 LVal = lvalue;

115 } else {

117 ValueTy = lvalue.getType();

118 ValueSizeInBits = C.getTypeSize(ValueTy);

120 lvalue.getType(), castllvm::FixedVectorType(

122 ->getNumElements());

123 AtomicSizeInBits = C.getTypeSize(AtomicTy);

124 AtomicAlign = ValueAlign = lvalue.getAlignment();

125 LVal = lvalue;

126 }

127 UseLibcall = C.getTargetInfo().hasBuiltinAtomic(

128 AtomicSizeInBits, C.toBits(lvalue.getAlignment()));

129 }

130

131 QualType getAtomicType() const { return AtomicTy; }

132 QualType getValueType() const { return ValueTy; }

133 CharUnits getAtomicAlignment() const { return AtomicAlign; }

134 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }

135 uint64_t getValueSizeInBits() const { return ValueSizeInBits; }

136 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }

137 bool shouldUseLibcall() const { return UseLibcall; }

138 const LValue &getAtomicLValue() const { return LVal; }

139 llvm::Value *getAtomicPointer() const {

148 }

149 Address getAtomicAddress() const {

150 llvm::Type *ElTy;

157 else

159 return Address(getAtomicPointer(), ElTy, getAtomicAlignment());

160 }

161

162 Address getAtomicAddressAsAtomicIntPointer() const {

163 return castToAtomicIntPointer(getAtomicAddress());

164 }

165

166

167

168

169

170

171

172 bool hasPadding() const {

173 return (ValueSizeInBits != AtomicSizeInBits);

174 }

175

176 bool emitMemSetZeroIfNecessary() const;

177

178 llvm::Value *getAtomicSizeValue() const {

181 }

182

183

184

186

187

188

189

190 Address convertToAtomicIntPointer(Address Addr) const;

191

192

195

196 llvm::Value *getScalarRValValueOrNull(RValue RVal) const;

197

198

199 llvm::Value *convertRValueToInt(RValue RVal, bool CmpXchg = false) const;

200

201 RValue ConvertToValueOrAtomic(llvm::Value *IntVal, AggValueSlot ResultSlot,

203 bool CmpXchg = false) const;

204

205

206 void emitCopyIntoMemory(RValue rvalue) const;

207

208

209 LValue projectValue() const {

211 Address addr = getAtomicAddress();

212 if (hasPadding())

214

215 return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),

217 }

218

219

220

222 bool AsValue, llvm::AtomicOrdering AO,

223 bool IsVolatile);

224

225

226

227

228

229

230

231

232

233 std::pair<RValue, llvm::Value *>

235 llvm::AtomicOrdering Success =

236 llvm::AtomicOrdering::SequentiallyConsistent,

237 llvm::AtomicOrdering Failure =

238 llvm::AtomicOrdering::SequentiallyConsistent,

239 bool IsWeak = false);

240

241

242

243

244 void EmitAtomicUpdate(llvm::AtomicOrdering AO,

245 const llvm::function_ref<RValue(RValue)> &UpdateOp,

246 bool IsVolatile);

247

248

249 void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,

250 bool IsVolatile);

251

252

254

255

256 Address CreateTempAlloca() const;

257 private:

258 bool requiresMemSetZero(llvm::Type *type) const;

259

260

261

262 void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,

263 llvm::AtomicOrdering AO, bool IsVolatile);

264

265 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile,

266 bool CmpXchg = false);

267

268 llvm::Value *EmitAtomicCompareExchangeLibcall(

269 llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,

270 llvm::AtomicOrdering Success =

271 llvm::AtomicOrdering::SequentiallyConsistent,

272 llvm::AtomicOrdering Failure =

273 llvm::AtomicOrdering::SequentiallyConsistent);

274

275 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(

276 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,

277 llvm::AtomicOrdering Success =

278 llvm::AtomicOrdering::SequentiallyConsistent,

279 llvm::AtomicOrdering Failure =

280 llvm::AtomicOrdering::SequentiallyConsistent,

281 bool IsWeak = false);

282

283 void

284 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,

285 const llvm::function_ref<RValue(RValue)> &UpdateOp,

286 bool IsVolatile);

287

288 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,

289 const llvm::function_ref<RValue(RValue)> &UpdateOp,

290 bool IsVolatile);

291

292 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,

293 bool IsVolatile);

294

295 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,

296 bool IsVolatile);

297 };

298}

299

300Address AtomicInfo::CreateTempAlloca() const {

302 (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy

303 : AtomicTy,

304 getAtomicAlignment(),

305 "atomic-temp");

306

309 TempAlloca, getAtomicAddress().getType(),

310 getAtomicAddress().getElementType());

311 return TempAlloca;

312}

313

315 StringRef fnName,

322 fnAttrB.addAttribute(llvm::Attribute::NoUnwind);

323 fnAttrB.addAttribute(llvm::Attribute::WillReturn);

324 llvm::AttributeList fnAttrs = llvm::AttributeList::get(

325 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);

326

327 llvm::FunctionCallee fn =

331}

332

333

335 uint64_t expectedSize) {

336 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);

337}

338

339

340

341

342bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {

343

344 if (hasPadding()) return true;

345

346

347 switch (getEvaluationKind()) {

348

349

354 AtomicSizeInBits / 2);

355

356

358 return false;

359 }

360 llvm_unreachable("bad evaluation kind");

361}

362

363bool AtomicInfo::emitMemSetZeroIfNecessary() const {

367 return false;

368

373 return true;

374}

375

379 uint64_t Size,

380 llvm::AtomicOrdering SuccessOrder,

381 llvm::AtomicOrdering FailureOrder,

382 llvm::SyncScope::ID Scope) {

383

386

388 Ptr, Expected, Desired, SuccessOrder, FailureOrder, Scope);

389 Pair->setVolatile(E->isVolatile());

390 Pair->setWeak(IsWeak);

392

393

394

395 llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);

396 llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);

397

398

399

400 llvm::BasicBlock *StoreExpectedBB =

402

403

404

405 llvm::BasicBlock *ContinueBB =

407

408

409

410 CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);

411

412 CGF.Builder.SetInsertPoint(StoreExpectedBB);

413

415

416 CGF.Builder.CreateBr(ContinueBB);

417

418 CGF.Builder.SetInsertPoint(ContinueBB);

419

421}

422

423

424

425

429 llvm::Value *FailureOrderVal,

430 uint64_t Size,

431 llvm::AtomicOrdering SuccessOrder,

432 llvm::SyncScope::ID Scope) {

433 llvm::AtomicOrdering FailureOrder;

434 if (llvm::ConstantInt *FO = dyn_castllvm::ConstantInt(FailureOrderVal)) {

435 auto FOS = FO->getSExtValue();

436 if (!llvm::isValidAtomicOrderingCABI(FOS))

437 FailureOrder = llvm::AtomicOrdering::Monotonic;

438 else

439 switch ((llvm::AtomicOrderingCABI)FOS) {

440 case llvm::AtomicOrderingCABI::relaxed:

441

442

443 case llvm::AtomicOrderingCABI::release:

444 case llvm::AtomicOrderingCABI::acq_rel:

445 FailureOrder = llvm::AtomicOrdering::Monotonic;

446 break;

447 case llvm::AtomicOrderingCABI::consume:

448 case llvm::AtomicOrderingCABI::acquire:

449 FailureOrder = llvm::AtomicOrdering::Acquire;

450 break;

451 case llvm::AtomicOrderingCABI::seq_cst:

452 FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;

453 break;

454 }

455

456

457

458

459 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,

460 FailureOrder, Scope);

461 return;

462 }

463

464

469

470

471

472

473 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);

474

475 SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),

476 AcquireBB);

477 SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),

478 AcquireBB);

479 SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),

480 SeqCstBB);

481

482

483 CGF.Builder.SetInsertPoint(MonotonicBB);

485 Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);

486 CGF.Builder.CreateBr(ContBB);

487

488 CGF.Builder.SetInsertPoint(AcquireBB);

489 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,

490 llvm::AtomicOrdering::Acquire, Scope);

491 CGF.Builder.CreateBr(ContBB);

492

493 CGF.Builder.SetInsertPoint(SeqCstBB);

494 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,

495 llvm::AtomicOrdering::SequentiallyConsistent, Scope);

496 CGF.Builder.CreateBr(ContBB);

497

498 CGF.Builder.SetInsertPoint(ContBB);

499}

500

501

502

505 bool IsSigned,

506 llvm::Value *OldVal,

507 llvm::Value *RHS) {

508 llvm::CmpInst::Predicate Pred;

509 switch (Op) {

510 default:

511 llvm_unreachable("Unexpected min/max operation");

512 case AtomicExpr::AO__atomic_max_fetch:

513 case AtomicExpr::AO__scoped_atomic_max_fetch:

514 Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;

515 break;

516 case AtomicExpr::AO__atomic_min_fetch:

517 case AtomicExpr::AO__scoped_atomic_min_fetch:

518 Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;

519 break;

520 }

521 llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS, "tst");

522 return Builder.CreateSelect(Cmp, OldVal, RHS, "newval");

523}

524

527 llvm::Value *IsWeak, llvm::Value *FailureOrder,

528 uint64_t Size, llvm::AtomicOrdering Order,

529 llvm::SyncScope::ID Scope) {

530 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;

531 bool PostOpMinMax = false;

532 unsigned PostOp = 0;

533

534 switch (E->getOp()) {

535 case AtomicExpr::AO__c11_atomic_init:

536 case AtomicExpr::AO__opencl_atomic_init:

537 llvm_unreachable("Already handled!");

538

539 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:

540 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:

541 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:

543 FailureOrder, Size, Order, Scope);

544 return;

545 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:

546 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:

547 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:

549 FailureOrder, Size, Order, Scope);

550 return;

551 case AtomicExpr::AO__atomic_compare_exchange:

552 case AtomicExpr::AO__atomic_compare_exchange_n:

553 case AtomicExpr::AO__scoped_atomic_compare_exchange:

554 case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {

555 if (llvm::ConstantInt *IsWeakC = dyn_castllvm::ConstantInt(IsWeak)) {

557 Val1, Val2, FailureOrder, Size, Order, Scope);

558 } else {

559

560 llvm::BasicBlock *StrongBB =

563 llvm::BasicBlock *ContBB =

565

566 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);

567 SI->addCase(CGF.Builder.getInt1(false), StrongBB);

568

569 CGF.Builder.SetInsertPoint(StrongBB);

571 FailureOrder, Size, Order, Scope);

572 CGF.Builder.CreateBr(ContBB);

573

574 CGF.Builder.SetInsertPoint(WeakBB);

576 FailureOrder, Size, Order, Scope);

577 CGF.Builder.CreateBr(ContBB);

578

579 CGF.Builder.SetInsertPoint(ContBB);

580 }

581 return;

582 }

583 case AtomicExpr::AO__c11_atomic_load:

584 case AtomicExpr::AO__opencl_atomic_load:

585 case AtomicExpr::AO__hip_atomic_load:

586 case AtomicExpr::AO__atomic_load_n:

587 case AtomicExpr::AO__atomic_load:

588 case AtomicExpr::AO__scoped_atomic_load_n:

589 case AtomicExpr::AO__scoped_atomic_load: {

591 Load->setAtomic(Order, Scope);

592 Load->setVolatile(E->isVolatile());

594 return;

595 }

596

597 case AtomicExpr::AO__c11_atomic_store:

598 case AtomicExpr::AO__opencl_atomic_store:

599 case AtomicExpr::AO__hip_atomic_store:

600 case AtomicExpr::AO__atomic_store:

601 case AtomicExpr::AO__atomic_store_n:

602 case AtomicExpr::AO__scoped_atomic_store:

603 case AtomicExpr::AO__scoped_atomic_store_n: {

606 Store->setAtomic(Order, Scope);

607 Store->setVolatile(E->isVolatile());

608 return;

609 }

610

611 case AtomicExpr::AO__c11_atomic_exchange:

612 case AtomicExpr::AO__hip_atomic_exchange:

613 case AtomicExpr::AO__opencl_atomic_exchange:

614 case AtomicExpr::AO__atomic_exchange_n:

615 case AtomicExpr::AO__atomic_exchange:

616 case AtomicExpr::AO__scoped_atomic_exchange_n:

617 case AtomicExpr::AO__scoped_atomic_exchange:

618 Op = llvm::AtomicRMWInst::Xchg;

619 break;

620

621 case AtomicExpr::AO__atomic_add_fetch:

622 case AtomicExpr::AO__scoped_atomic_add_fetch:

623 PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FAdd

624 : llvm::Instruction::Add;

625 [[fallthrough]];

626 case AtomicExpr::AO__c11_atomic_fetch_add:

627 case AtomicExpr::AO__hip_atomic_fetch_add:

628 case AtomicExpr::AO__opencl_atomic_fetch_add:

629 case AtomicExpr::AO__atomic_fetch_add:

630 case AtomicExpr::AO__scoped_atomic_fetch_add:

631 Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FAdd

632 : llvm::AtomicRMWInst::Add;

633 break;

634

635 case AtomicExpr::AO__atomic_sub_fetch:

636 case AtomicExpr::AO__scoped_atomic_sub_fetch:

637 PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FSub

638 : llvm::Instruction::Sub;

639 [[fallthrough]];

640 case AtomicExpr::AO__c11_atomic_fetch_sub:

641 case AtomicExpr::AO__hip_atomic_fetch_sub:

642 case AtomicExpr::AO__opencl_atomic_fetch_sub:

643 case AtomicExpr::AO__atomic_fetch_sub:

644 case AtomicExpr::AO__scoped_atomic_fetch_sub:

645 Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FSub

646 : llvm::AtomicRMWInst::Sub;

647 break;

648

649 case AtomicExpr::AO__atomic_min_fetch:

650 case AtomicExpr::AO__scoped_atomic_min_fetch:

651 PostOpMinMax = true;

652 [[fallthrough]];

653 case AtomicExpr::AO__c11_atomic_fetch_min:

654 case AtomicExpr::AO__hip_atomic_fetch_min:

655 case AtomicExpr::AO__opencl_atomic_fetch_min:

656 case AtomicExpr::AO__atomic_fetch_min:

657 case AtomicExpr::AO__scoped_atomic_fetch_min:

658 Op = E->getValueType()->isFloatingType()

659 ? llvm::AtomicRMWInst::FMin

660 : (E->getValueType()->isSignedIntegerType()

661 ? llvm::AtomicRMWInst::Min

662 : llvm::AtomicRMWInst::UMin);

663 break;

664

665 case AtomicExpr::AO__atomic_max_fetch:

666 case AtomicExpr::AO__scoped_atomic_max_fetch:

667 PostOpMinMax = true;

668 [[fallthrough]];

669 case AtomicExpr::AO__c11_atomic_fetch_max:

670 case AtomicExpr::AO__hip_atomic_fetch_max:

671 case AtomicExpr::AO__opencl_atomic_fetch_max:

672 case AtomicExpr::AO__atomic_fetch_max:

673 case AtomicExpr::AO__scoped_atomic_fetch_max:

674 Op = E->getValueType()->isFloatingType()

675 ? llvm::AtomicRMWInst::FMax

676 : (E->getValueType()->isSignedIntegerType()

677 ? llvm::AtomicRMWInst::Max

678 : llvm::AtomicRMWInst::UMax);

679 break;

680

681 case AtomicExpr::AO__atomic_and_fetch:

682 case AtomicExpr::AO__scoped_atomic_and_fetch:

683 PostOp = llvm::Instruction::And;

684 [[fallthrough]];

685 case AtomicExpr::AO__c11_atomic_fetch_and:

686 case AtomicExpr::AO__hip_atomic_fetch_and:

687 case AtomicExpr::AO__opencl_atomic_fetch_and:

688 case AtomicExpr::AO__atomic_fetch_and:

689 case AtomicExpr::AO__scoped_atomic_fetch_and:

690 Op = llvm::AtomicRMWInst::And;

691 break;

692

693 case AtomicExpr::AO__atomic_or_fetch:

694 case AtomicExpr::AO__scoped_atomic_or_fetch:

695 PostOp = llvm::Instruction::Or;

696 [[fallthrough]];

697 case AtomicExpr::AO__c11_atomic_fetch_or:

698 case AtomicExpr::AO__hip_atomic_fetch_or:

699 case AtomicExpr::AO__opencl_atomic_fetch_or:

700 case AtomicExpr::AO__atomic_fetch_or:

701 case AtomicExpr::AO__scoped_atomic_fetch_or:

702 Op = llvm::AtomicRMWInst::Or;

703 break;

704

705 case AtomicExpr::AO__atomic_xor_fetch:

706 case AtomicExpr::AO__scoped_atomic_xor_fetch:

707 PostOp = llvm::Instruction::Xor;

708 [[fallthrough]];

709 case AtomicExpr::AO__c11_atomic_fetch_xor:

710 case AtomicExpr::AO__hip_atomic_fetch_xor:

711 case AtomicExpr::AO__opencl_atomic_fetch_xor:

712 case AtomicExpr::AO__atomic_fetch_xor:

713 case AtomicExpr::AO__scoped_atomic_fetch_xor:

714 Op = llvm::AtomicRMWInst::Xor;

715 break;

716

717 case AtomicExpr::AO__atomic_nand_fetch:

718 case AtomicExpr::AO__scoped_atomic_nand_fetch:

719 PostOp = llvm::Instruction::And;

720 [[fallthrough]];

721 case AtomicExpr::AO__c11_atomic_fetch_nand:

722 case AtomicExpr::AO__atomic_fetch_nand:

723 case AtomicExpr::AO__scoped_atomic_fetch_nand:

724 Op = llvm::AtomicRMWInst::Nand;

725 break;

726

727 case AtomicExpr::AO__atomic_test_and_set: {

728 llvm::AtomicRMWInst *RMWI =

731 RMWI->setVolatile(E->isVolatile());

732 llvm::Value *Result = CGF.Builder.CreateIsNotNull(RMWI, "tobool");

734 return;

735 }

736

737 case AtomicExpr::AO__atomic_clear: {

738 llvm::StoreInst *Store =

740 Store->setAtomic(Order, Scope);

741 Store->setVolatile(E->isVolatile());

742 return;

743 }

744 }

745

747 llvm::AtomicRMWInst *RMWI =

749 RMWI->setVolatile(E->isVolatile());

750

751

752

753 llvm::Value *Result = RMWI;

754 if (PostOpMinMax)

756 E->getValueType()->isSignedIntegerType(),

757 RMWI, LoadVal1);

758 else if (PostOp)

759 Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,

760 LoadVal1);

761 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch ||

762 E->getOp() == AtomicExpr::AO__scoped_atomic_nand_fetch)

763 Result = CGF.Builder.CreateNot(Result);

765}

766

767

768

773 true);

774 return DeclPtr;

775}

776

779 llvm::Value *IsWeak, llvm::Value *FailureOrder,

780 uint64_t Size, llvm::AtomicOrdering Order,

781 llvm::Value *Scope) {

782 auto ScopeModel = Expr->getScopeModel();

783

784

785

786 if (!ScopeModel) {

787 llvm::SyncScope::ID SS;

789

790

791

792

794 SyncScope::OpenCLDevice,

796 else

797 SS = llvm::SyncScope::System;

798 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,

799 Order, SS);

800 return;

801 }

802

803

804 if (auto SC = dyn_castllvm::ConstantInt(Scope)) {

806 CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),

808 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,

809 Order, SCID);

810 return;

811 }

812

813

814 auto &Builder = CGF.Builder;

815 auto Scopes = ScopeModel->getRuntimeValues();

816 llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;

817 for (auto S : Scopes)

819

820 llvm::BasicBlock *ContBB =

822

823 auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);

824

825

826 auto FallBack = ScopeModel->getFallBackValue();

827 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);

828 for (auto S : Scopes) {

829 auto *B = BB[S];

830 if (S != FallBack)

831 SI->addCase(Builder.getInt32(S), B);

832

833 Builder.SetInsertPoint(B);

834 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,

835 Order,

837 ScopeModel->map(S),

838 Order,

840 Builder.CreateBr(ContBB);

841 }

842

843 Builder.SetInsertPoint(ContBB);

844}

845

850 MemTy = AT->getValueType();

851 llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;

852

857

858 if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||

859 E->getOp() == AtomicExpr::AO__opencl_atomic_init) {

863 }

864

868

872 bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0;

873 bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;

874 if (Misaligned) {

876 << (int)TInfo.Width.getQuantity()

878 }

879 if (Oversized) {

881 << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity();

882 }

883

885 llvm::Value *Scope =

886 E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;

887 bool ShouldCastToIntPtrTy = true;

888

889 switch (E->getOp()) {

890 case AtomicExpr::AO__c11_atomic_init:

891 case AtomicExpr::AO__opencl_atomic_init:

892 llvm_unreachable("Already handled above with EmitAtomicInit!");

893

894 case AtomicExpr::AO__atomic_load_n:

895 case AtomicExpr::AO__scoped_atomic_load_n:

896 case AtomicExpr::AO__c11_atomic_load:

897 case AtomicExpr::AO__opencl_atomic_load:

898 case AtomicExpr::AO__hip_atomic_load:

899 case AtomicExpr::AO__atomic_test_and_set:

900 case AtomicExpr::AO__atomic_clear:

901 break;

902

903 case AtomicExpr::AO__atomic_load:

904 case AtomicExpr::AO__scoped_atomic_load:

906 break;

907

908 case AtomicExpr::AO__atomic_store:

909 case AtomicExpr::AO__scoped_atomic_store:

911 break;

912

913 case AtomicExpr::AO__atomic_exchange:

914 case AtomicExpr::AO__scoped_atomic_exchange:

917 break;

918

919 case AtomicExpr::AO__atomic_compare_exchange:

920 case AtomicExpr::AO__atomic_compare_exchange_n:

921 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:

922 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:

923 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:

924 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:

925 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:

926 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:

927 case AtomicExpr::AO__scoped_atomic_compare_exchange:

928 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:

930 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange ||

931 E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)

933 else

936 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||

937 E->getOp() == AtomicExpr::AO__atomic_compare_exchange ||

938 E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||

939 E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)

941 break;

942

943 case AtomicExpr::AO__c11_atomic_fetch_add:

944 case AtomicExpr::AO__c11_atomic_fetch_sub:

945 case AtomicExpr::AO__hip_atomic_fetch_add:

946 case AtomicExpr::AO__hip_atomic_fetch_sub:

947 case AtomicExpr::AO__opencl_atomic_fetch_add:

948 case AtomicExpr::AO__opencl_atomic_fetch_sub:

950

951

952

953

958 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));

960 Val1 = Temp;

962 break;

963 }

964 [[fallthrough]];

965 case AtomicExpr::AO__atomic_fetch_add:

966 case AtomicExpr::AO__atomic_fetch_max:

967 case AtomicExpr::AO__atomic_fetch_min:

968 case AtomicExpr::AO__atomic_fetch_sub:

969 case AtomicExpr::AO__atomic_add_fetch:

970 case AtomicExpr::AO__atomic_max_fetch:

971 case AtomicExpr::AO__atomic_min_fetch:

972 case AtomicExpr::AO__atomic_sub_fetch:

973 case AtomicExpr::AO__c11_atomic_fetch_max:

974 case AtomicExpr::AO__c11_atomic_fetch_min:

975 case AtomicExpr::AO__opencl_atomic_fetch_max:

976 case AtomicExpr::AO__opencl_atomic_fetch_min:

977 case AtomicExpr::AO__hip_atomic_fetch_max:

978 case AtomicExpr::AO__hip_atomic_fetch_min:

979 case AtomicExpr::AO__scoped_atomic_fetch_add:

980 case AtomicExpr::AO__scoped_atomic_fetch_max:

981 case AtomicExpr::AO__scoped_atomic_fetch_min:

982 case AtomicExpr::AO__scoped_atomic_fetch_sub:

983 case AtomicExpr::AO__scoped_atomic_add_fetch:

984 case AtomicExpr::AO__scoped_atomic_max_fetch:

985 case AtomicExpr::AO__scoped_atomic_min_fetch:

986 case AtomicExpr::AO__scoped_atomic_sub_fetch:

988 [[fallthrough]];

989

990 case AtomicExpr::AO__atomic_fetch_and:

991 case AtomicExpr::AO__atomic_fetch_nand:

992 case AtomicExpr::AO__atomic_fetch_or:

993 case AtomicExpr::AO__atomic_fetch_xor:

994 case AtomicExpr::AO__atomic_and_fetch:

995 case AtomicExpr::AO__atomic_nand_fetch:

996 case AtomicExpr::AO__atomic_or_fetch:

997 case AtomicExpr::AO__atomic_xor_fetch:

998 case AtomicExpr::AO__atomic_store_n:

999 case AtomicExpr::AO__atomic_exchange_n:

1000 case AtomicExpr::AO__c11_atomic_fetch_and:

1001 case AtomicExpr::AO__c11_atomic_fetch_nand:

1002 case AtomicExpr::AO__c11_atomic_fetch_or:

1003 case AtomicExpr::AO__c11_atomic_fetch_xor:

1004 case AtomicExpr::AO__c11_atomic_store:

1005 case AtomicExpr::AO__c11_atomic_exchange:

1006 case AtomicExpr::AO__hip_atomic_fetch_and:

1007 case AtomicExpr::AO__hip_atomic_fetch_or:

1008 case AtomicExpr::AO__hip_atomic_fetch_xor:

1009 case AtomicExpr::AO__hip_atomic_store:

1010 case AtomicExpr::AO__hip_atomic_exchange:

1011 case AtomicExpr::AO__opencl_atomic_fetch_and:

1012 case AtomicExpr::AO__opencl_atomic_fetch_or:

1013 case AtomicExpr::AO__opencl_atomic_fetch_xor:

1014 case AtomicExpr::AO__opencl_atomic_store:

1015 case AtomicExpr::AO__opencl_atomic_exchange:

1016 case AtomicExpr::AO__scoped_atomic_fetch_and:

1017 case AtomicExpr::AO__scoped_atomic_fetch_nand:

1018 case AtomicExpr::AO__scoped_atomic_fetch_or:

1019 case AtomicExpr::AO__scoped_atomic_fetch_xor:

1020 case AtomicExpr::AO__scoped_atomic_and_fetch:

1021 case AtomicExpr::AO__scoped_atomic_nand_fetch:

1022 case AtomicExpr::AO__scoped_atomic_or_fetch:

1023 case AtomicExpr::AO__scoped_atomic_xor_fetch:

1024 case AtomicExpr::AO__scoped_atomic_store_n:

1025 case AtomicExpr::AO__scoped_atomic_exchange_n:

1027 break;

1028 }

1029

1031

1032

1033

1034

1036 AtomicInfo Atomics(*this, AtomicVal);

1037

1038 if (ShouldCastToIntPtrTy) {

1039 Ptr = Atomics.castToAtomicIntPointer(Ptr);

1041 Val1 = Atomics.convertToAtomicIntPointer(Val1);

1043 Val2 = Atomics.convertToAtomicIntPointer(Val2);

1044 }

1046 if (ShouldCastToIntPtrTy)

1047 Dest = Atomics.castToAtomicIntPointer(Dest);

1048 } else if (E->isCmpXChg())

1051 Dest = Atomics.CreateTempAlloca();

1052 if (ShouldCastToIntPtrTy)

1053 Dest = Atomics.castToAtomicIntPointer(Dest);

1054 }

1055

1056 bool PowerOf2Size = (Size & (Size - 1)) == 0;

1057 bool UseLibcall = !PowerOf2Size || (Size > 16);

1058

1059

1060

1061

1062

1063

1064

1065

1066

1067

1068 if (UseLibcall) {

1070

1073

1074

1075

1076

1077 auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {

1078 if (E->isOpenCL())

1079 return V;

1082 return V;

1084 auto *DestType = llvm::PointerType::get(getLLVMContext(), DestAS);

1085

1088 };

1089

1093

1094

1095 std::string LibCallName;

1097 bool HaveRetTy = false;

1098 switch (E->getOp()) {

1099 case AtomicExpr::AO__c11_atomic_init:

1100 case AtomicExpr::AO__opencl_atomic_init:

1101 llvm_unreachable("Already handled!");

1102

1103

1104

1105

1106

1107

1108 case AtomicExpr::AO__atomic_compare_exchange:

1109 case AtomicExpr::AO__atomic_compare_exchange_n:

1110 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:

1111 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:

1112 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:

1113 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:

1114 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:

1115 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:

1116 case AtomicExpr::AO__scoped_atomic_compare_exchange:

1117 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:

1118 LibCallName = "__atomic_compare_exchange";

1120 HaveRetTy = true;

1128 Order = OrderFail;

1129 break;

1130

1131

1132 case AtomicExpr::AO__atomic_exchange:

1133 case AtomicExpr::AO__atomic_exchange_n:

1134 case AtomicExpr::AO__c11_atomic_exchange:

1135 case AtomicExpr::AO__hip_atomic_exchange:

1136 case AtomicExpr::AO__opencl_atomic_exchange:

1137 case AtomicExpr::AO__scoped_atomic_exchange:

1138 case AtomicExpr::AO__scoped_atomic_exchange_n:

1139 LibCallName = "__atomic_exchange";

1143 break;

1144

1145 case AtomicExpr::AO__atomic_store:

1146 case AtomicExpr::AO__atomic_store_n:

1147 case AtomicExpr::AO__c11_atomic_store:

1148 case AtomicExpr::AO__hip_atomic_store:

1149 case AtomicExpr::AO__opencl_atomic_store:

1150 case AtomicExpr::AO__scoped_atomic_store:

1151 case AtomicExpr::AO__scoped_atomic_store_n:

1152 LibCallName = "__atomic_store";

1154 HaveRetTy = true;

1158 break;

1159

1160 case AtomicExpr::AO__atomic_load:

1161 case AtomicExpr::AO__atomic_load_n:

1162 case AtomicExpr::AO__c11_atomic_load:

1163 case AtomicExpr::AO__hip_atomic_load:

1164 case AtomicExpr::AO__opencl_atomic_load:

1165 case AtomicExpr::AO__scoped_atomic_load:

1166 case AtomicExpr::AO__scoped_atomic_load_n:

1167 LibCallName = "__atomic_load";

1168 break;

1169 case AtomicExpr::AO__atomic_add_fetch:

1170 case AtomicExpr::AO__scoped_atomic_add_fetch:

1171 case AtomicExpr::AO__atomic_fetch_add:

1172 case AtomicExpr::AO__c11_atomic_fetch_add:

1173 case AtomicExpr::AO__hip_atomic_fetch_add:

1174 case AtomicExpr::AO__opencl_atomic_fetch_add:

1175 case AtomicExpr::AO__scoped_atomic_fetch_add:

1176 case AtomicExpr::AO__atomic_and_fetch:

1177 case AtomicExpr::AO__scoped_atomic_and_fetch:

1178 case AtomicExpr::AO__atomic_fetch_and:

1179 case AtomicExpr::AO__c11_atomic_fetch_and:

1180 case AtomicExpr::AO__hip_atomic_fetch_and:

1181 case AtomicExpr::AO__opencl_atomic_fetch_and:

1182 case AtomicExpr::AO__scoped_atomic_fetch_and:

1183 case AtomicExpr::AO__atomic_or_fetch:

1184 case AtomicExpr::AO__scoped_atomic_or_fetch:

1185 case AtomicExpr::AO__atomic_fetch_or:

1186 case AtomicExpr::AO__c11_atomic_fetch_or:

1187 case AtomicExpr::AO__hip_atomic_fetch_or:

1188 case AtomicExpr::AO__opencl_atomic_fetch_or:

1189 case AtomicExpr::AO__scoped_atomic_fetch_or:

1190 case AtomicExpr::AO__atomic_sub_fetch:

1191 case AtomicExpr::AO__scoped_atomic_sub_fetch:

1192 case AtomicExpr::AO__atomic_fetch_sub:

1193 case AtomicExpr::AO__c11_atomic_fetch_sub:

1194 case AtomicExpr::AO__hip_atomic_fetch_sub:

1195 case AtomicExpr::AO__opencl_atomic_fetch_sub:

1196 case AtomicExpr::AO__scoped_atomic_fetch_sub:

1197 case AtomicExpr::AO__atomic_xor_fetch:

1198 case AtomicExpr::AO__scoped_atomic_xor_fetch:

1199 case AtomicExpr::AO__atomic_fetch_xor:

1200 case AtomicExpr::AO__c11_atomic_fetch_xor:

1201 case AtomicExpr::AO__hip_atomic_fetch_xor:

1202 case AtomicExpr::AO__opencl_atomic_fetch_xor:

1203 case AtomicExpr::AO__scoped_atomic_fetch_xor:

1204 case AtomicExpr::AO__atomic_nand_fetch:

1205 case AtomicExpr::AO__atomic_fetch_nand:

1206 case AtomicExpr::AO__c11_atomic_fetch_nand:

1207 case AtomicExpr::AO__scoped_atomic_fetch_nand:

1208 case AtomicExpr::AO__scoped_atomic_nand_fetch:

1209 case AtomicExpr::AO__atomic_min_fetch:

1210 case AtomicExpr::AO__atomic_fetch_min:

1211 case AtomicExpr::AO__c11_atomic_fetch_min:

1212 case AtomicExpr::AO__hip_atomic_fetch_min:

1213 case AtomicExpr::AO__opencl_atomic_fetch_min:

1214 case AtomicExpr::AO__scoped_atomic_fetch_min:

1215 case AtomicExpr::AO__scoped_atomic_min_fetch:

1216 case AtomicExpr::AO__atomic_max_fetch:

1217 case AtomicExpr::AO__atomic_fetch_max:

1218 case AtomicExpr::AO__c11_atomic_fetch_max:

1219 case AtomicExpr::AO__hip_atomic_fetch_max:

1220 case AtomicExpr::AO__opencl_atomic_fetch_max:

1221 case AtomicExpr::AO__scoped_atomic_fetch_max:

1222 case AtomicExpr::AO__scoped_atomic_max_fetch:

1223 case AtomicExpr::AO__atomic_test_and_set:

1224 case AtomicExpr::AO__atomic_clear:

1225 llvm_unreachable("Integral atomic operations always become atomicrmw!");

1226 }

1227

1228 if (E->isOpenCL()) {

1229 LibCallName =

1230 std::string("__opencl") + StringRef(LibCallName).drop_front(1).str();

1231 }

1232

1233 if (!HaveRetTy) {

1234

1237 CastToGenericAddrSpace(Dest.emitRawPointer(*this), RetTy)),

1239 }

1240

1243 if (E->isOpenCL())

1245

1247

1248 if (E->isCmpXChg())

1249 return Res;

1250

1253

1256 }

1257

1258 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||

1259 E->getOp() == AtomicExpr::AO__opencl_atomic_store ||

1260 E->getOp() == AtomicExpr::AO__hip_atomic_store ||

1261 E->getOp() == AtomicExpr::AO__atomic_store ||

1262 E->getOp() == AtomicExpr::AO__atomic_store_n ||

1263 E->getOp() == AtomicExpr::AO__scoped_atomic_store ||

1264 E->getOp() == AtomicExpr::AO__scoped_atomic_store_n ||

1265 E->getOp() == AtomicExpr::AO__atomic_clear;

1266 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||

1267 E->getOp() == AtomicExpr::AO__opencl_atomic_load ||

1268 E->getOp() == AtomicExpr::AO__hip_atomic_load ||

1269 E->getOp() == AtomicExpr::AO__atomic_load ||

1270 E->getOp() == AtomicExpr::AO__atomic_load_n ||

1271 E->getOp() == AtomicExpr::AO__scoped_atomic_load ||

1272 E->getOp() == AtomicExpr::AO__scoped_atomic_load_n;

1273

1274 if (isallvm::ConstantInt(Order)) {

1275 auto ord = castllvm::ConstantInt(Order)->getZExtValue();

1276

1277

1278 if (llvm::isValidAtomicOrderingCABI(ord))

1279 switch ((llvm::AtomicOrderingCABI)ord) {

1280 case llvm::AtomicOrderingCABI::relaxed:

1281 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,

1282 llvm::AtomicOrdering::Monotonic, Scope);

1283 break;

1284 case llvm::AtomicOrderingCABI::consume:

1285 case llvm::AtomicOrderingCABI::acquire:

1286 if (IsStore)

1287 break;

1288 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,

1289 llvm::AtomicOrdering::Acquire, Scope);

1290 break;

1291 case llvm::AtomicOrderingCABI::release:

1292 if (IsLoad)

1293 break;

1294 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,

1295 llvm::AtomicOrdering::Release, Scope);

1296 break;

1297 case llvm::AtomicOrderingCABI::acq_rel:

1298 if (IsLoad || IsStore)

1299 break;

1300 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,

1301 llvm::AtomicOrdering::AcquireRelease, Scope);

1302 break;

1303 case llvm::AtomicOrderingCABI::seq_cst:

1304 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,

1305 llvm::AtomicOrdering::SequentiallyConsistent, Scope);

1306 break;

1307 }

1310

1313 }

1314

1315

1316

1317

1318 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,

1319 *ReleaseBB = nullptr, *AcqRelBB = nullptr,

1320 *SeqCstBB = nullptr;

1322 if (!IsStore)

1324 if (!IsLoad)

1326 if (!IsLoad && !IsStore)

1330

1331

1332

1333

1334

1335 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);

1336 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);

1337

1338

1339 Builder.SetInsertPoint(MonotonicBB);

1340 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,

1341 llvm::AtomicOrdering::Monotonic, Scope);

1342 Builder.CreateBr(ContBB);

1343 if (!IsStore) {

1344 Builder.SetInsertPoint(AcquireBB);

1345 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,

1346 llvm::AtomicOrdering::Acquire, Scope);

1347 Builder.CreateBr(ContBB);

1348 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),

1349 AcquireBB);

1350 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),

1351 AcquireBB);

1352 }

1353 if (!IsLoad) {

1354 Builder.SetInsertPoint(ReleaseBB);

1355 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,

1356 llvm::AtomicOrdering::Release, Scope);

1357 Builder.CreateBr(ContBB);

1358 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),

1359 ReleaseBB);

1360 }

1361 if (!IsLoad && !IsStore) {

1362 Builder.SetInsertPoint(AcqRelBB);

1363 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,

1364 llvm::AtomicOrdering::AcquireRelease, Scope);

1365 Builder.CreateBr(ContBB);

1366 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),

1367 AcqRelBB);

1368 }

1369 Builder.SetInsertPoint(SeqCstBB);

1370 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,

1371 llvm::AtomicOrdering::SequentiallyConsistent, Scope);

1372 Builder.CreateBr(ContBB);

1373 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),

1374 SeqCstBB);

1375

1376

1377 Builder.SetInsertPoint(ContBB);

1380

1381 assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());

1384}

1385

1386Address AtomicInfo::castToAtomicIntPointer(Address addr) const {

1387 llvm::IntegerType *ty =

1388 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);

1390}

1391

1392Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {

1395 if (SourceSizeInBits != AtomicSizeInBits) {

1396 Address Tmp = CreateTempAlloca();

1398 std::min(AtomicSizeInBits, SourceSizeInBits) / 8);

1399 Addr = Tmp;

1400 }

1401

1402 return castToAtomicIntPointer(Addr);

1403}

1404

1405RValue AtomicInfo::convertAtomicTempToRValue(Address addr,

1408 bool asValue) const {

1411 return resultSlot.asRValue();

1412

1413

1414 if (hasPadding())

1416

1417

1418

1420 }

1421 if (!asValue)

1422

1436}

1437

1438

1439

1440

1441

1442

1444 if (ValTy->isFloatingPointTy())

1445 return ValTy->isX86_FP80Ty() || CmpXchg;

1446 return !ValTy->isIntegerTy() && !ValTy->isPointerTy();

1447}

1448

1449RValue AtomicInfo::ConvertToValueOrAtomic(llvm::Value *Val,

1452 bool CmpXchg) const {

1453

1454 assert((Val->getType()->isIntegerTy() || Val->getType()->isPointerTy() ||

1455 Val->getType()->isIEEELikeFPTy()) &&

1456 "Expected integer, pointer or floating point value when converting "

1457 "result.");

1458 if (getEvaluationKind() == TEK_Scalar &&

1461 !hasPadding()) ||

1462 !AsValue)) {

1463 auto *ValTy = AsValue

1465 : getAtomicAddress().getElementType();

1467 assert((!ValTy->isIntegerTy() || Val->getType() == ValTy) &&

1468 "Different integer types.");

1470 }

1471 if (llvm::CastInst::isBitCastable(Val->getType(), ValTy))

1473 }

1474

1475

1476

1478 bool TempIsVolatile = false;

1479 if (AsValue && getEvaluationKind() == TEK_Aggregate) {

1480 assert(!ResultSlot.isIgnored());

1482 TempIsVolatile = ResultSlot.isVolatile();

1483 } else {

1484 Temp = CreateTempAlloca();

1485 }

1486

1487

1488 Address CastTemp = castToAtomicIntPointer(Temp);

1489 CGF.Builder.CreateStore(Val, CastTemp)->setVolatile(TempIsVolatile);

1490

1491 return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);

1492}

1493

1494void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,

1495 llvm::AtomicOrdering AO, bool) {

1496

1501 Args.add(

1502 RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),

1505}

1506

1507llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,

1508 bool IsVolatile, bool CmpXchg) {

1509

1510 Address Addr = getAtomicAddress();

1512 Addr = castToAtomicIntPointer(Addr);

1514 Load->setAtomic(AO);

1515

1516

1517 if (IsVolatile)

1518 Load->setVolatile(true);

1520 return Load;

1521}

1522

1523

1524

1525

1528 AtomicInfo AI(*this, LV);

1530

1531 bool AtomicIsInline = !AI.shouldUseLibcall();

1532

1535 return false;

1536 return IsVolatile && AtomicIsInline;

1537}

1538

1541 llvm::AtomicOrdering AO;

1544 AO = llvm::AtomicOrdering::SequentiallyConsistent;

1545 } else {

1546 AO = llvm::AtomicOrdering::Acquire;

1547 IsVolatile = true;

1548 }

1549 return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);

1550}

1551

1553 bool AsValue, llvm::AtomicOrdering AO,

1554 bool IsVolatile) {

1555

1556 if (shouldUseLibcall()) {

1561 } else

1562 TempAddr = CreateTempAlloca();

1563

1564 EmitAtomicLoadLibcall(TempAddr.emitRawPointer(CGF), AO, IsVolatile);

1565

1566

1567

1568 return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);

1569 }

1570

1571

1572 auto *Load = EmitAtomicLoadOp(AO, IsVolatile);

1573

1574

1577

1578

1579

1580 return ConvertToValueOrAtomic(Load, ResultSlot, Loc, AsValue);

1581}

1582

1583

1584

1586 llvm::AtomicOrdering AO, bool IsVolatile,

1588 AtomicInfo Atomics(*this, src);

1589 return Atomics.EmitAtomicLoad(resultSlot, loc, true, AO,

1590 IsVolatile);

1591}

1592

1593

1594

1595void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {

1597

1598

1599

1603 getAtomicType());

1608 return;

1609 }

1610

1611

1612

1613

1614 emitMemSetZeroIfNecessary();

1615

1616

1617 LValue TempLVal = projectValue();

1618

1619

1622 } else {

1624 }

1625}

1626

1627

1628

1629

1630Address AtomicInfo::materializeRValue(RValue rvalue) const {

1631

1632

1635

1636

1638 AtomicInfo Atomics(CGF, TempLV);

1639 Atomics.emitCopyIntoMemory(rvalue);

1641}

1642

1643llvm::Value *AtomicInfo::getScalarRValValueOrNull(RValue RVal) const {

1646 return nullptr;

1647}

1648

1649llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal, bool CmpXchg) const {

1650

1651

1652 if (llvm::Value *Value = getScalarRValValueOrNull(RVal)) {

1655 else {

1656 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(

1658 LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());

1659 if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))

1660 return CGF.Builder.CreateBitCast(Value, InputIntTy);

1661 }

1662 }

1663

1664

1665 Address Addr = materializeRValue(RVal);

1666

1667

1668 Addr = castToAtomicIntPointer(Addr);

1670}

1671

1672std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(

1673 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,

1674 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {

1675

1676 Address Addr = getAtomicAddressAsAtomicIntPointer();

1679

1681 Inst->setWeak(IsWeak);

1682

1683

1684 auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, 0);

1685 auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, 1);

1686 return std::make_pair(PreviousVal, SuccessFailureVal);

1687}

1688

1689llvm::Value *

1690AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,

1691 llvm::Value *DesiredAddr,

1692 llvm::AtomicOrdering Success,

1693 llvm::AtomicOrdering Failure) {

1694

1695

1702 llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),

1705 llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),

1707 auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",

1709

1710 return SuccessFailureRVal.getScalarVal();

1711}

1712

1713std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(

1715 llvm::AtomicOrdering Failure, bool IsWeak) {

1716

1717 if (shouldUseLibcall()) {

1718

1720 llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);

1721 llvm::Value *DesiredPtr = materializeRValue(Desired).emitRawPointer(CGF);

1722 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr,

1724 return std::make_pair(

1727 Res);

1728 }

1729

1730

1731

1732 auto *ExpectedVal = convertRValueToInt(Expected, true);

1733 auto *DesiredVal = convertRValueToInt(Desired, true);

1734 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,

1735 Failure, IsWeak);

1736 return std::make_pair(

1739 true),

1741}

1742

1743static void

1745 const llvm::function_ref<RValue(RValue)> &UpdateOp,

1748 LValue AtomicLVal = Atomics.getAtomicLValue();

1750 if (AtomicLVal.isSimple()) {

1751 UpRVal = OldRVal;

1753 } else {

1754

1755 Address Ptr = Atomics.materializeRValue(OldRVal);

1758 UpdateLVal =

1763 DesiredLVal =

1775 } else {

1784 }

1786 }

1787

1788 RValue NewRVal = UpdateOp(UpRVal);

1791 } else {

1794 false);

1795 }

1796}

1797

1798void AtomicInfo::EmitAtomicUpdateLibcall(

1799 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,

1800 bool IsVolatile) {

1801 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);

1802

1803 Address ExpectedAddr = CreateTempAlloca();

1804

1805 EmitAtomicLoadLibcall(ExpectedAddr.emitRawPointer(CGF), AO, IsVolatile);

1809 Address DesiredAddr = CreateTempAlloca();

1810 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||

1811 requiresMemSetZero(getAtomicAddress().getElementType())) {

1814 }

1815 auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,

1819 llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);

1820 llvm::Value *DesiredPtr = DesiredAddr.emitRawPointer(CGF);

1821 auto *Res =

1822 EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);

1823 CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);

1824 CGF.EmitBlock(ExitBB, true);

1825}

1826

1827void AtomicInfo::EmitAtomicUpdateOp(

1828 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,

1829 bool IsVolatile) {

1830 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);

1831

1832

1833 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile, true);

1834

1837 auto *CurBB = CGF.Builder.GetInsertBlock();

1839 llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),

1840 2);

1841 PHI->addIncoming(OldVal, CurBB);

1842 Address NewAtomicAddr = CreateTempAlloca();

1843 Address NewAtomicIntAddr =

1845 ? castToAtomicIntPointer(NewAtomicAddr)

1846 : NewAtomicAddr;

1847

1848 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||

1849 requiresMemSetZero(getAtomicAddress().getElementType())) {

1851 }

1854 true);

1857

1858 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);

1859 PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());

1860 CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);

1861 CGF.EmitBlock(ExitBB, true);

1862}

1863

1866 LValue AtomicLVal = Atomics.getAtomicLValue();

1868

1870 DesiredLVal =

1875 DesiredLVal =

1879 } else {

1884 }

1885

1886 assert(UpdateRVal.isScalar());

1888}

1889

1890void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,

1891 RValue UpdateRVal, bool IsVolatile) {

1892 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);

1893

1894 Address ExpectedAddr = CreateTempAlloca();

1895

1896 EmitAtomicLoadLibcall(ExpectedAddr.emitRawPointer(CGF), AO, IsVolatile);

1900 Address DesiredAddr = CreateTempAlloca();

1901 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||

1902 requiresMemSetZero(getAtomicAddress().getElementType())) {

1905 }

1907 llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);

1908 llvm::Value *DesiredPtr = DesiredAddr.emitRawPointer(CGF);

1909 auto *Res =

1910 EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);

1911 CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);

1912 CGF.EmitBlock(ExitBB, true);

1913}

1914

1915void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,

1916 bool IsVolatile) {

1917 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);

1918

1919

1920 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile, true);

1921

1924 auto *CurBB = CGF.Builder.GetInsertBlock();

1926 llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),

1927 2);

1928 PHI->addIncoming(OldVal, CurBB);

1929 Address NewAtomicAddr = CreateTempAlloca();

1930 Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr);

1931 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||

1932 requiresMemSetZero(getAtomicAddress().getElementType())) {

1934 }

1937

1938 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);

1939 PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());

1940 CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);

1941 CGF.EmitBlock(ExitBB, true);

1942}

1943

1944void AtomicInfo::EmitAtomicUpdate(

1945 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,

1946 bool IsVolatile) {

1947 if (shouldUseLibcall()) {

1948 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);

1949 } else {

1950 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);

1951 }

1952}

1953

1954void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,

1955 bool IsVolatile) {

1956 if (shouldUseLibcall()) {

1957 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);

1958 } else {

1959 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);

1960 }

1961}

1962

1964 bool isInit) {

1966 llvm::AtomicOrdering AO;

1968 AO = llvm::AtomicOrdering::SequentiallyConsistent;

1969 } else {

1970 AO = llvm::AtomicOrdering::Release;

1971 IsVolatile = true;

1972 }

1973 return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);

1974}

1975

1976

1977

1978

1979

1980

1982 llvm::AtomicOrdering AO, bool IsVolatile,

1983 bool isInit) {

1984

1985

1989

1990 AtomicInfo atomics(*this, dest);

1991 LValue LVal = atomics.getAtomicLValue();

1992

1993

1995 if (isInit) {

1996 atomics.emitCopyIntoMemory(rvalue);

1997 return;

1998 }

1999

2000

2001 if (atomics.shouldUseLibcall()) {

2002

2003 Address srcAddr = atomics.materializeRValue(rvalue);

2004

2005

2012 args.add(

2013 RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),

2016 return;

2017 }

2018

2019

2020 llvm::Value *ValToStore = atomics.convertRValueToInt(rvalue);

2021

2022

2023 Address Addr = atomics.getAtomicAddress();

2024 if (llvm::Value *Value = atomics.getScalarRValValueOrNull(rvalue))

2026 Addr = atomics.castToAtomicIntPointer(Addr);

2028 false);

2029 }

2031

2032 if (AO == llvm::AtomicOrdering::Acquire)

2033 AO = llvm::AtomicOrdering::Monotonic;

2034 else if (AO == llvm::AtomicOrdering::AcquireRelease)

2035 AO = llvm::AtomicOrdering::Release;

2036

2037 if (!isInit)

2038 store->setAtomic(AO);

2039

2040

2041 if (IsVolatile)

2042 store->setVolatile(true);

2044 return;

2045 }

2046

2047

2048 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);

2049}

2050

2051

2052

2055 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,

2057

2058

2059 assert(Expected.isAggregate() ||

2060 Expected.getAggregateAddress().getElementType() ==

2065 AtomicInfo Atomics(*this, Obj);

2066

2067 return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,

2068 IsWeak);

2069}

2070

2071llvm::AtomicRMWInst *

2073 llvm::Value *Val, llvm::AtomicOrdering Order,

2074 llvm::SyncScope::ID SSID,

2076 llvm::AtomicRMWInst *RMW =

2079 return RMW;

2080}

2081

2083 LValue LVal, llvm::AtomicOrdering AO,

2084 const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {

2085 AtomicInfo Atomics(*this, LVal);

2086 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);

2087}

2088

2090 AtomicInfo atomics(*this, dest);

2091

2092 switch (atomics.getEvaluationKind()) {

2095 atomics.emitCopyIntoMemory(RValue::get(value));

2096 return;

2097 }

2098

2102 return;

2103 }

2104

2106

2107

2108 bool Zeroed = false;

2110 Zeroed = atomics.emitMemSetZeroIfNecessary();

2111 dest = atomics.projectValue();

2112 }

2113

2114

2120

2122 return;

2123 }

2124 }

2125 llvm_unreachable("bad evaluation kind");

2126}

Defines the clang::ASTContext interface.

static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type, uint64_t expectedSize)

Does a store of the given IR type modify the full expected width?

static llvm::Value * EmitPostAtomicMinMax(CGBuilderTy &Builder, AtomicExpr::AtomicOp Op, bool IsSigned, llvm::Value *OldVal, llvm::Value *RHS)

Duplicate the atomic min/max operation in conventional IR for the builtin variants that return the ne...

static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, Address DesiredAddr)

static Address EmitValToTemp(CodeGenFunction &CGF, Expr *E)

static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, llvm::AtomicOrdering Order, llvm::SyncScope::ID Scope)

static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)

static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *FailureOrderVal, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope)

Given an ordering required on success, emit all possible cmpxchg instructions to cope with the provid...

static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::AtomicOrdering FailureOrder, llvm::SyncScope::ID Scope)

static bool shouldCastToInt(llvm::Type *ValTy, bool CmpXchg)

Return true if.

CodeGenFunction::ComplexPairTy ComplexPairTy

static QualType getPointeeType(const MemRegion *R)

Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...

CanQualType getSizeType() const

Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.

TypeInfoChars getTypeInfoInChars(const Type *T) const

int64_t toBits(CharUnits CharSize) const

Convert a size in characters to a size in bits.

CharUnits getTypeSizeInChars(QualType T) const

Return the size of the specified (complete) type T, in characters.

QualType getExtVectorType(QualType VectorType, unsigned NumElts) const

Return the unique reference to an extended vector type of the specified element type and size.

CharUnits toCharUnitsFromBits(int64_t BitSize) const

Convert a size in bits to a size in characters.

unsigned getTargetAddressSpace(LangAS AS) const

AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...

CharUnits - This is an opaque type for sizes expressed in character units.

bool isZero() const

isZero - Test whether the quantity equals zero.

llvm::Align getAsAlign() const

getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...

QuantityType getQuantity() const

getQuantity - Get the raw integer representation of this quantity.

Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...

llvm::Value * emitRawPointer(CodeGenFunction &CGF) const

Return the pointer contained in this class after authenticating it and adding offset to it if necessa...

CharUnits getAlignment() const

llvm::Type * getElementType() const

Return the type of the values stored in this address.

Address withElementType(llvm::Type *ElemTy) const

Return address with different element type, but same pointer and alignment.

static AggValueSlot ignored()

ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.

Address getAddress() const

static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)

llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)

Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")

llvm::AtomicRMWInst * CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Ordering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)

llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)

Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")

llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(Address Addr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)

llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")

llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)

Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")

static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())

CGFunctionInfo - Class to encapsulate the information about a function definition.

CallArgList - Type for representing both the value and type of arguments in a call.

void add(RValue rvalue, QualType type)

CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...

llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)

EmitFromMemory - Change a scalar value from its memory representation to its value representation.

std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())

static TypeEvaluationKind getEvaluationKind(QualType T)

getEvaluationKind - Return the TypeEvaluationKind of QualType T.

void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)

EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...

RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())

bool hasVolatileMember(QualType T)

hasVolatileMember - returns true if aggregate type has a volatile member.

llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)

createBasicBlock - Create an LLVM basic block.

void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)

const LangOptions & getLangOpts() const

void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)

EmitBlock - Emit the given block.

ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)

EmitComplexExpr - Emit the computation of the specified expression of complex type,...

RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)

EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...

RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)

void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)

EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...

llvm::Type * ConvertTypeForMem(QualType T)

RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)

CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...

void EmitAtomicInit(Expr *E, LValue lvalue)

const TargetInfo & getTarget() const

llvm::Value * getTypeSize(QualType Ty)

Returns calculated size of the specified type.

Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)

EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...

RValue EmitLoadOfExtVectorElementLValue(LValue V)

void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)

EmitAggregateCopy - Emit an aggregate copy.

const TargetCodeGenInfo & getTargetHooks() const

void EmitAggExpr(const Expr *E, AggValueSlot AS)

EmitAggExpr - Emit the computation of the specified expression of aggregate type.

llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)

EmitToMemory - Change a scalar value from its value representation to its in-memory representation.

RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)

EmitCall - Generate a call of the given function, expecting the given result type,...

ASTContext & getContext() const

LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)

void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)

EmitStoreOfComplex - Store a complex number into the specified l-value.

void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)

llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System, const AtomicExpr *AE=nullptr)

Emit an atomicrmw instruction, and applying relevant metadata when applicable.

llvm::LLVMContext & getLLVMContext()

llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)

EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...

bool LValueIsSuitableForInlineAtomic(LValue Src)

void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)

EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...

RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)

This class organizes the cross-function state that is used while generating LLVM code.

llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)

Create or return a runtime function declaration with the specified type and name.

DiagnosticsEngine & getDiags() const

const LangOptions & getLangOpts() const

CodeGenTypes & getTypes()

const llvm::DataLayout & getDataLayout() const

void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)

DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.

llvm::LLVMContext & getLLVMContext()

llvm::ConstantInt * getSize(CharUnits numChars)

Emit the given number of characters as a value of type size_t.

llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)

GetFunctionType - Get the LLVM function type for.

const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)

LValue - This represents an lvalue references.

llvm::Value * getRawExtVectorPointer(CodeGenFunction &CGF) const

llvm::Constant * getExtVectorElts() const

void setAlignment(CharUnits A)

bool isVolatileQualified() const

llvm::Value * getRawBitFieldPointer(CodeGenFunction &CGF) const

CharUnits getAlignment() const

static LValue MakeExtVectorElt(Address Addr, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)

Address getAddress() const

llvm::Value * getRawVectorPointer(CodeGenFunction &CGF) const

bool isExtVectorElt() const

llvm::Value * getVectorIdx() const

LValueBaseInfo getBaseInfo() const

const CGBitFieldInfo & getBitFieldInfo() const

TBAAAccessInfo getTBAAInfo() const

Address getVectorAddress() const

static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)

Create a new object to represent a bit-field access.

llvm::Value * emitRawPointer(CodeGenFunction &CGF) const

static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)

Address getExtVectorAddress() const

Address getBitFieldAddress() const

RValue - This trivial value class is used to represent the result of an expression that is evaluated.

static RValue get(llvm::Value *V)

static RValue getAggregate(Address addr, bool isVolatile=false)

Convert an Address to an RValue.

static RValue getComplex(llvm::Value *V1, llvm::Value *V2)

Address getAggregateAddress() const

getAggregateAddr() - Return the Value* of the address of the aggregate.

llvm::Value * getScalarVal() const

getScalarVal() - Return the Value* of this scalar value.

bool isVolatileQualified() const

std::pair< llvm::Value *, llvm::Value * > getComplexVal() const

getComplexVal - Return the real/imag components of this complex value.

ReturnValueSlot - Contains the address where the return value of a function can be stored,...

Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const

virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const

Get the syncscope used in LLVM IR.

virtual void setTargetAtomicMetadata(CodeGenFunction &CGF, llvm::Instruction &AtomicInst, const AtomicExpr *Expr=nullptr) const

Allow the target to apply other metadata to an atomic instruction.

Concrete class used by the front-end to report problems and issues.

DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)

Issue the message to the client.

This represents one expression.

SourceLocation getExprLoc() const LLVM_READONLY

getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...

PointerType - C99 6.7.5.1 - Pointer Declarators.

A (possibly-)qualified type.

bool isNull() const

Return true if this QualType doesn't point to a type yet.

LangAS getAddressSpace() const

Return the address space of this type.

Qualifiers getQualifiers() const

Retrieve the set of qualifiers applied to this type.

QualType getUnqualifiedType() const

Retrieve the unqualified variant of the given type, removing as little sugar as possible.

Scope - A scope is a transient data structure that is used while parsing the program.

Encodes a location in the source.

SourceLocation getBeginLoc() const LLVM_READONLY

unsigned getMaxAtomicInlineWidth() const

Return the maximum width lock-free atomic operation which can be inlined given the supported features...

bool isPointerType() const

const T * castAs() const

Member-template castAs.

QualType getPointeeType() const

If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.

bool isAtomicType() const

bool isFloatingType() const

const T * getAs() const

Member-template getAs'.

Represents a GCC generic vector type.

TypeEvaluationKind

The kind of evaluation to perform on values of a particular type.

const internal::VariadicAllOfMatcher< Type > type

Matches Types in the clang AST.

bool Load(InterpState &S, CodePtr OpPC)

The JSON file list parser is used to communicate input to InstallAPI.

llvm::StringRef getAsString(SyncScope S)

@ Success

Template argument deduction was successful.

Structure with information about how a bitfield should be accessed.

CharUnits StorageOffset

The offset of the bitfield storage from the start of the struct.

unsigned Offset

The offset within a contiguous run of bitfields that are represented as a single "field" within the L...

unsigned Size

The total size of the bit-field, in bits.

unsigned StorageSize

The storage size in bits which should be used when accessing this bitfield.

llvm::PointerType * VoidPtrTy

llvm::IntegerType * Int8Ty

i8, i16, i32, and i64

llvm::IntegerType * SizeTy

llvm::IntegerType * IntTy

int

llvm::PointerType * UnqualPtrTy