clang: lib/CodeGen/Targets/AMDGPU.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

10#include "TargetInfo.h"

12#include "llvm/Support/AMDGPUAddrSpace.h"

13

14using namespace clang;

16

17

18

19

20

21namespace {

22

24private:

25 static const unsigned MaxNumRegsForArgsRet = 16;

26

27 unsigned numRegsForType(QualType Ty) const;

28

31 uint64_t Members) const override;

32

33

34 llvm::Type *coerceKernelArgumentType(llvm::Type *Ty, unsigned FromAS,

35 unsigned ToAS) const {

36

37 auto *PtrTy = llvm::dyn_castllvm::PointerType(Ty);

38 if (PtrTy && PtrTy->getAddressSpace() == FromAS)

39 return llvm::PointerType::get(Ty->getContext(), ToAS);

40 return Ty;

41 }

42

43public:

46

50 unsigned &NumRegsLeft) const;

51

55};

56

57bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {

58 return true;

59}

60

61bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(

62 const Type *Base, uint64_t Members) const {

63 uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32;

64

65

66 return Members * NumRegs <= MaxNumRegsForArgsRet;

67}

68

69

70unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const {

71 unsigned NumRegs = 0;

72

74

75

76 QualType EltTy = VT->getElementType();

77 unsigned EltSize = getContext().getTypeSize(EltTy);

78

79

80 if (EltSize == 16)

81 return (VT->getNumElements() + 1) / 2;

82

83 unsigned EltNumRegs = (EltSize + 31) / 32;

84 return EltNumRegs * VT->getNumElements();

85 }

86

90

93 NumRegs += numRegsForType(FieldTy);

94 }

95

96 return NumRegs;

97 }

98

99 return (getContext().getTypeSize(Ty) + 31) / 32;

100}

101

102void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {

104

107

108 unsigned ArgumentIndex = 0;

110

111 unsigned NumRegsLeft = MaxNumRegsForArgsRet;

112 for (auto &Arg : FI.arguments()) {

113 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {

114 Arg.info = classifyKernelArgumentType(Arg.type);

115 } else {

116 bool FixedArgument = ArgumentIndex++ < numFixedArguments;

118 }

119 }

120}

121

124 const bool IsIndirect = false;

125 const bool AllowHigherAlign = false;

127 getContext().getTypeInfoInChars(Ty),

129}

130

133

134

136

139

140

143

145 const RecordDecl *RD = RT->getDecl();

148 }

149

150

151 uint64_t Size = getContext().getTypeSize(RetTy);

152 if (Size <= 16)

154

155 if (Size <= 32)

157

158 if (Size <= 64) {

159 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());

161 }

162

163 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)

165 }

166 }

167

168

170}

171

172

173

174ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const {

176

177

178

181

182 llvm::Type *OrigLTy = CGT.ConvertType(Ty);

183 llvm::Type *LTy = OrigLTy;

184 if (getContext().getLangOpts().HIP) {

185 LTy = coerceKernelArgumentType(

186 OrigLTy, getContext().getTargetAddressSpace(LangAS::Default),

187 getContext().getTargetAddressSpace(LangAS::cuda_device));

188 }

189

190

191

192

193

194

195

196 if (!getContext().getLangOpts().OpenCL && LTy == OrigLTy &&

199 getContext().getTypeAlignInChars(Ty),

200 getContext().getTargetAddressSpace(LangAS::opencl_constant),

201 false , nullptr );

202 }

203

204

205

206

208}

209

210ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty, bool Variadic,

211 unsigned &NumRegsLeft) const {

212 assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow");

213

215

216 if (Variadic) {

218 0,

219 nullptr,

220 false,

221 0);

222 }

223

225

226

229

230

233

234

235

236

239

241 const RecordDecl *RD = RT->getDecl();

244 }

245

246

247 uint64_t Size = getContext().getTypeSize(Ty);

248 if (Size <= 64) {

249 unsigned NumRegs = (Size + 31) / 32;

250 NumRegsLeft -= std::min(NumRegsLeft, NumRegs);

251

252 if (Size <= 16)

254

255 if (Size <= 32)

257

258

259 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());

261 }

262

263 if (NumRegsLeft > 0) {

264 unsigned NumRegs = numRegsForType(Ty);

265 if (NumRegsLeft >= NumRegs) {

266 NumRegsLeft -= NumRegs;

268 }

269 }

270

271

272

274 getContext().getTypeAlignInChars(Ty),

275 getContext().getTargetAddressSpace(LangAS::opencl_private));

276 }

277

278

281 unsigned NumRegs = numRegsForType(Ty);

282 NumRegsLeft -= std::min(NumRegs, NumRegsLeft);

283 }

284

285 return ArgInfo;

286}

287

289public:

292

293 void setFunctionDeclAttributes(const FunctionDecl *FD, llvm::Function *F,

295

297

301

303 llvm::PointerType *T, QualType QT) const override;

304

307 getABIInfo().getDataLayout().getAllocaAddrSpace());

308 }

310 const VarDecl *D) const override;

313 llvm::AtomicOrdering Ordering,

314 llvm::LLVMContext &Ctx) const override;

316 llvm::Instruction &AtomicInst,

319 llvm::Function *BlockInvokeFunc,

320 llvm::Type *BlockTy) const override;

324};

325}

326

328 llvm::GlobalValue *GV) {

329 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)

330 return false;

331

332 return D->hasAttr() &&

333 (D->hasAttr() ||

334 (isa(D) && D->hasAttr()) ||

335 (isa(D) &&

336 (D->hasAttr() || D->hasAttr() ||

337 cast(D)->getType()->isCUDADeviceBuiltinSurfaceType() ||

338 cast(D)->getType()->isCUDADeviceBuiltinTextureType())));

339}

340

341void AMDGPUTargetCodeGenInfo::setFunctionDeclAttributes(

343 const auto *ReqdWGS =

344 M.getLangOpts().OpenCL ? FD->getAttr() : nullptr;

345 const bool IsOpenCLKernel =

347 const bool IsHIPKernel = M.getLangOpts().HIP && FD->hasAttr();

348

349 const auto *FlatWGS = FD->getAttr();

350 if (ReqdWGS || FlatWGS) {

352 } else if (IsOpenCLKernel || IsHIPKernel) {

353

354

355 const unsigned OpenCLDefaultMaxWorkGroupSize = 256;

356 const unsigned DefaultMaxWorkGroupSize =

357 IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize

359 std::string AttrVal =

360 std::string("1,") + llvm::utostr(DefaultMaxWorkGroupSize);

361 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);

362 }

363

364 if (const auto *Attr = FD->getAttr())

366

367 if (const auto *Attr = FD->getAttr()) {

368 unsigned NumSGPR = Attr->getNumSGPR();

369

370 if (NumSGPR != 0)

371 F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR));

372 }

373

374 if (const auto *Attr = FD->getAttr()) {

376

377 if (NumVGPR != 0)

378 F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));

379 }

380

381 if (const auto *Attr = FD->getAttr()) {

383 ->EvaluateKnownConstInt(M.getContext())

384 .getExtValue();

385

387 ? Attr->getMaxNumWorkGroupsY()

388 ->EvaluateKnownConstInt(M.getContext())

389 .getExtValue()

390 : 1;

392 ? Attr->getMaxNumWorkGroupsZ()

393 ->EvaluateKnownConstInt(M.getContext())

394 .getExtValue()

395 : 1;

396

398 llvm::raw_svector_ostream OS(AttrVal);

399 OS << X << ',' << Y << ',' << Z;

400

401 F->addFnAttr("amdgpu-max-num-workgroups", AttrVal.str());

402 }

403}

404

405

406

407void AMDGPUTargetCodeGenInfo::emitTargetGlobals(

409 StringRef Name = "__oclc_ABI_version";

410 llvm::GlobalVariable *OriginalGV = CGM.getModule().getNamedGlobal(Name);

411 if (OriginalGV && !llvm::GlobalVariable::isExternalLinkage(OriginalGV->getLinkage()))

412 return;

413

415 llvm::CodeObjectVersionKind::COV_None)

416 return;

417

418 auto *Type = llvm::IntegerType::getIntNTy(CGM.getModule().getContext(), 32);

419 llvm::Constant *COV = llvm::ConstantInt::get(

421

422

423

424 auto *GV = new llvm::GlobalVariable(

425 CGM.getModule(), Type, true, llvm::GlobalValue::WeakODRLinkage, COV, Name,

426 nullptr, llvm::GlobalValue::ThreadLocalMode::NotThreadLocal,

428 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Local);

429 GV->setVisibility(llvm::GlobalValue::VisibilityTypes::HiddenVisibility);

430

431

432 if (OriginalGV) {

433 OriginalGV->replaceAllUsesWith(GV);

434 GV->takeName(OriginalGV);

435 OriginalGV->eraseFromParent();

436 }

437}

438

439void AMDGPUTargetCodeGenInfo::setTargetAttributes(

442 GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);

443 GV->setDSOLocal(true);

444 }

445

446 if (GV->isDeclaration())

447 return;

448

449 llvm::Function *F = dyn_castllvm::Function(GV);

450 if (!F)

451 return;

452

453 const FunctionDecl *FD = dyn_cast_or_null(D);

454 if (FD)

455 setFunctionDeclAttributes(FD, F, M);

456

457 if (!getABIInfo().getCodeGenOpts().EmitIEEENaNCompliantInsts)

458 F->addFnAttr("amdgpu-ieee", "false");

459}

460

461unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {

462 return llvm::CallingConv::AMDGPU_KERNEL;

463}

464

465

466

467

468

469

470llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(

474 return llvm::ConstantPointerNull::get(PT);

475

477 auto NPT = llvm::PointerType::get(

479 return llvm::ConstantExpr::getAddrSpaceCast(

480 llvm::ConstantPointerNull::get(NPT), PT);

481}

482

484AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,

488 "Address space agnostic languages only");

491 if (D)

492 return DefaultGlobalAS;

493

494 LangAS AddrSpace = D->getType().getAddressSpace();

495 if (AddrSpace != LangAS::Default)

496 return AddrSpace;

497

498

499 if (D->getType().isConstantStorage(CGM.getContext(), false, false) &&

500 D->hasConstantInitialization()) {

502 return *ConstAS;

503 }

504 return DefaultGlobalAS;

505}

506

507llvm::SyncScope::ID

508AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,

510 llvm::AtomicOrdering Ordering,

511 llvm::LLVMContext &Ctx) const {

512 std::string Name;

514 case SyncScope::HIPSingleThread:

515 case SyncScope::SingleScope:

516 Name = "singlethread";

517 break;

518 case SyncScope::HIPWavefront:

519 case SyncScope::OpenCLSubGroup:

520 case SyncScope::WavefrontScope:

521 Name = "wavefront";

522 break;

523 case SyncScope::HIPWorkgroup:

524 case SyncScope::OpenCLWorkGroup:

525 case SyncScope::WorkgroupScope:

526 Name = "workgroup";

527 break;

528 case SyncScope::HIPAgent:

529 case SyncScope::OpenCLDevice:

530 case SyncScope::DeviceScope:

531 Name = "agent";

532 break;

533 case SyncScope::SystemScope:

534 case SyncScope::HIPSystem:

535 case SyncScope::OpenCLAllSVMDevices:

536 Name = "";

537 break;

538 }

539

540

541

542 if (Scope >= SyncScope::OpenCLWorkGroup &&

543 Scope <= SyncScope::OpenCLSubGroup &&

544 Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {

545 if (!Name.empty())

546 Name = Twine(Twine(Name) + Twine("-")).str();

547

548 Name = Twine(Twine(Name) + Twine("one-as")).str();

549 }

550

551 return Ctx.getOrInsertSyncScopeID(Name);

552}

553

554void AMDGPUTargetCodeGenInfo::setTargetAtomicMetadata(

557 auto *RMW = dyn_castllvm::AtomicRMWInst(&AtomicInst);

558 auto *CmpX = dyn_castllvm::AtomicCmpXchgInst(&AtomicInst);

559

560

561

562

563

564

565 if (((RMW && RMW->getPointerAddressSpace() == llvm::AMDGPUAS::FLAT_ADDRESS) ||

566 (CmpX &&

567 CmpX->getPointerAddressSpace() == llvm::AMDGPUAS::FLAT_ADDRESS)) &&

570 llvm::MDNode *ASRange = MDHelper.createRange(

571 llvm::APInt(32, llvm::AMDGPUAS::PRIVATE_ADDRESS),

572 llvm::APInt(32, llvm::AMDGPUAS::PRIVATE_ADDRESS + 1));

573 AtomicInst.setMetadata(llvm::LLVMContext::MD_noalias_addrspace, ASRange);

574 }

575

577 return;

578

579

580

581 llvm::AtomicRMWInst::BinOp RMWOp = RMW->getOperation();

582 if (llvm::AtomicRMWInst::isFPOperation(RMWOp)) {

584 RMW->setMetadata("amdgpu.no.fine.grained.memory", Empty);

585

586 if (RMWOp == llvm::AtomicRMWInst::FAdd && RMW->getType()->isFloatTy())

587 RMW->setMetadata("amdgpu.ignore.denormal.mode", Empty);

588 }

589}

590

591bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {

592 return false;

593}

594

595bool AMDGPUTargetCodeGenInfo::shouldEmitDWARFBitFieldSeparators() const {

596 return true;

597}

598

599void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention(

601 FT = getABIInfo().getContext().adjustFunctionType(

603}

604

605

606

607

608

609

610

611

612

613llvm::Value *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(

614 CodeGenFunction &CGF, llvm::Function *Invoke, llvm::Type *BlockTy) const {

615 auto &Builder = CGF.Builder;

617

618 auto *InvokeFT = Invoke->getFunctionType();

626

627 ArgTys.push_back(BlockTy);

628 ArgTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));

629 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));

630 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));

631 ArgTypeQuals.push_back(llvm::MDString::get(C, ""));

632 AccessQuals.push_back(llvm::MDString::get(C, "none"));

633 ArgNames.push_back(llvm::MDString::get(C, "block_literal"));

634 for (unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {

635 ArgTys.push_back(InvokeFT->getParamType(I));

636 ArgTypeNames.push_back(llvm::MDString::get(C, "void*"));

637 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));

638 AccessQuals.push_back(llvm::MDString::get(C, "none"));

639 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "void*"));

640 ArgTypeQuals.push_back(llvm::MDString::get(C, ""));

641 ArgNames.push_back(

642 llvm::MDString::get(C, (Twine("local_arg") + Twine(I)).str()));

643 }

644 std::string Name = Invoke->getName().str() + "_kernel";

645 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);

646 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,

648 F->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);

649

650 llvm::AttrBuilder KernelAttrs(C);

651

652

654 KernelAttrs.addAttribute("enqueued-block");

655 F->addFnAttrs(KernelAttrs);

656

657 auto IP = CGF.Builder.saveIP();

658 auto *BB = llvm::BasicBlock::Create(C, "entry", F);

659 Builder.SetInsertPoint(BB);

660 const auto BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(BlockTy);

661 auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr);

662 BlockPtr->setAlignment(BlockAlign);

663 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);

664 auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));

666 Args.push_back(Cast);

667 for (llvm::Argument &A : llvm::drop_begin(F->args()))

668 Args.push_back(&A);

669 llvm::CallInst *call = Builder.CreateCall(Invoke, Args);

670 call->setCallingConv(Invoke->getCallingConv());

671 Builder.CreateRetVoid();

672 Builder.restoreIP(IP);

673

674 F->setMetadata("kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals));

675 F->setMetadata("kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals));

676 F->setMetadata("kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames));

677 F->setMetadata("kernel_arg_base_type",

678 llvm::MDNode::get(C, ArgBaseTypeNames));

679 F->setMetadata("kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals));

681 F->setMetadata("kernel_arg_name", llvm::MDNode::get(C, ArgNames));

682

683 return F;

684}

685

687 llvm::Function *F, const AMDGPUFlatWorkGroupSizeAttr *FlatWGS,

688 const ReqdWorkGroupSizeAttr *ReqdWGS, int32_t *MinThreadsVal,

689 int32_t *MaxThreadsVal) {

690 unsigned Min = 0;

691 unsigned Max = 0;

692 if (FlatWGS) {

693 Min = FlatWGS->getMin()->EvaluateKnownConstInt(getContext()).getExtValue();

694 Max = FlatWGS->getMax()->EvaluateKnownConstInt(getContext()).getExtValue();

695 }

696 if (ReqdWGS && Min == 0 && Max == 0)

697 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();

698

699 if (Min != 0) {

700 assert(Min <= Max && "Min must be less than or equal Max");

701

702 if (MinThreadsVal)

703 *MinThreadsVal = Min;

704 if (MaxThreadsVal)

705 *MaxThreadsVal = Max;

706 std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max);

707 if (F)

708 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);

709 } else

710 assert(Max == 0 && "Max must be zero");

711}

712

714 llvm::Function *F, const AMDGPUWavesPerEUAttr *Attr) {

715 unsigned Min =

716 Attr->getMin()->EvaluateKnownConstInt(getContext()).getExtValue();

717 unsigned Max =

718 Attr->getMax()

719 ? Attr->getMax()->EvaluateKnownConstInt(getContext()).getExtValue()

720 : 0;

721

722 if (Min != 0) {

723 assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max");

724

725 std::string AttrVal = llvm::utostr(Min);

726 if (Max != 0)

727 AttrVal = AttrVal + "," + llvm::utostr(Max);

728 F->addFnAttr("amdgpu-waves-per-eu", AttrVal);

729 } else

730 assert(Max == 0 && "Max must be zero");

731}

732

733std::unique_ptr

735 return std::make_unique(CGM.getTypes());

736}

static bool requiresAMDGPUProtectedVisibility(const Decl *D, llvm::GlobalValue *GV)

Defines the clang::TargetOptions class.

uint64_t getTargetNullPointerValue(QualType QT) const

Get target-dependent integer value for null pointer which is used for constant folding.

unsigned getTargetAddressSpace(LangAS AS) const

AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...

bool threadPrivateMemoryAtomicsAreUndefined() const

Return true if atomics operations targeting allocations in private memory are undefined.

Attr - This represents one attribute.

static CharUnits fromQuantity(QuantityType Quantity)

fromQuantity - Construct a CharUnits quantity from a raw integer type.

ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...

static ABIArgInfo getIgnore()

static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)

static ABIArgInfo getIndirectAliased(CharUnits Alignment, unsigned AddrSpace, bool Realign=false, llvm::Type *Padding=nullptr)

Pass this in memory using the IR byref attribute.

virtual bool isHomogeneousAggregateBaseType(QualType Ty) const

virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const

Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...

@ RAA_DirectInMemory

Pass it on the stack using its defined layout.

CGFunctionInfo - Class to encapsulate the information about a function definition.

ABIArgInfo & getReturnInfo()

unsigned getCallingConvention() const

getCallingConvention - Return the user specified calling convention, which has been translated into a...

CanQualType getReturnType() const

MutableArrayRef< ArgInfo > arguments()

unsigned getNumRequiredArgs() const

CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...

const TargetInfo & getTarget() const

llvm::LLVMContext & getLLVMContext()

This class organizes the cross-function state that is used while generating LLVM code.

llvm::Module & getModule() const

void handleAMDGPUWavesPerEUAttr(llvm::Function *F, const AMDGPUWavesPerEUAttr *A)

Emit the IR encoding to attach the AMD GPU waves-per-eu attribute to F.

const LangOptions & getLangOpts() const

CodeGenTypes & getTypes()

const TargetInfo & getTarget() const

void handleAMDGPUFlatWorkGroupSizeAttr(llvm::Function *F, const AMDGPUFlatWorkGroupSizeAttr *A, const ReqdWorkGroupSizeAttr *ReqdWGS=nullptr, int32_t *MinThreadsVal=nullptr, int32_t *MaxThreadsVal=nullptr)

Emit the IR encoding to attach the AMD GPU flat-work-group-size attribute to F.

const llvm::DataLayout & getDataLayout() const

ASTContext & getContext() const

const CodeGenOptions & getCodeGenOpts() const

void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs)

Like the overload taking a Function &, but intended specifically for frontends that want to build on ...

This class organizes the cross-module state that is used while lowering AST types to LLVM types.

DefaultABIInfo - The default implementation for ABI specific details.

ABIArgInfo classifyArgumentType(QualType RetTy) const

RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, AggValueSlot Slot) const override

EmitVAArg - Emit the target dependent code to load a value of.

ABIArgInfo classifyReturnType(QualType RetTy) const

void computeInfo(CGFunctionInfo &FI) const override

RValue - This trivial value class is used to represent the result of an expression that is evaluated.

TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...

virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const

virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const

Get the syncscope used in LLVM IR.

const T & getABIInfo() const

virtual unsigned getOpenCLKernelCallingConv() const

Get LLVM calling convention for OpenCL kernel.

virtual LangAS getGlobalVarAddressSpace(CodeGenModule &CGM, const VarDecl *D) const

Get target favored AST address space of a global variable for languages other than OpenCL and CUDA.

virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const

setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...

virtual bool shouldEmitDWARFBitFieldSeparators() const

virtual llvm::Constant * getNullPointer(const CodeGen::CodeGenModule &CGM, llvm::PointerType *T, QualType QT) const

Get target specific null pointer.

virtual LangAS getASTAllocaAddressSpace() const

Get the AST address space for alloca.

virtual void setTargetAtomicMetadata(CodeGenFunction &CGF, llvm::Instruction &AtomicInst, const AtomicExpr *Expr=nullptr) const

Allow the target to apply other metadata to an atomic instruction.

virtual llvm::Value * createEnqueuedBlockKernel(CodeGenFunction &CGF, llvm::Function *BlockInvokeFunc, llvm::Type *BlockTy) const

Create an OpenCL kernel for an enqueued block.

virtual void emitTargetGlobals(CodeGen::CodeGenModule &CGM) const

Provides a convenient hook to handle extra target-specific globals.

virtual bool shouldEmitStaticExternCAliases() const

Decl - This represents one declaration (or definition), e.g.

This represents one expression.

Represents a member of a struct/union/class.

Represents a function declaration or definition.

ExtInfo withCallingConv(CallingConv cc) const

FunctionType - C99 6.7.5.3 - Function Declarators.

ExtInfo getExtInfo() const

Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...

A (possibly-)qualified type.

Represents a struct/union/class.

bool hasFlexibleArrayMember() const

field_range fields() const

A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...

Scope - A scope is a transient data structure that is used while parsing the program.

TargetOptions & getTargetOpts() const

Retrieve the target options.

virtual std::optional< LangAS > getConstantAddressSpace() const

Return an AST address space which can be used opportunistically for constant global memory.

bool allowAMDGPUUnsafeFPAtomics() const

Returns whether or not the AMDGPU unsafe floating point atomics are allowed.

llvm::CodeObjectVersionKind CodeObjectVersion

Code object version for AMDGPU.

The base class of the type hierarchy.

const T * getAs() const

Member-template getAs'.

Represents a variable declaration or definition.

Represents a GCC generic vector type.

ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)

Classify the rules for how to pass a particular type.

CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)

bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)

std::unique_ptr< TargetCodeGenInfo > createAMDGPUTargetCodeGenInfo(CodeGenModule &CGM)

RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)

Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...

bool isAggregateTypeForABI(QualType T)

const Type * isSingleElementStruct(QualType T, ASTContext &Context)

isSingleElementStruct - Determine if a structure is a "single element struct", i.e.

QualType useFirstFieldIfTransparentUnion(QualType Ty)

Pass transparent unions as if they were the type of the first element.

bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)

isEmptyRecord - Return true iff a structure contains only empty fields.

bool Cast(InterpState &S, CodePtr OpPC)

The JSON file list parser is used to communicate input to InstallAPI.

LangAS

Defines the address space values used by the address space qualifier of QualType.

const FunctionProtoType * T

SyncScope

Defines synch scope values used internally by clang.

LangAS getLangASFromTargetAS(unsigned TargetAS)