clang: lib/CodeGen/Targets/X86.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

10#include "TargetInfo.h"

12#include "llvm/ADT/SmallBitVector.h"

13

14using namespace clang;

16

17namespace {

18

19

20bool IsX86_MMXType(llvm::Type *IRType) {

21

22 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&

23 castllvm::VectorType(IRType)->getElementType()->isIntegerTy() &&

24 IRType->getScalarSizeInBits() != 64;

25}

26

28 StringRef Constraint,

29 llvm::Type *Ty) {

30 if (Constraint == "k") {

31 llvm::Type *Int1Ty = llvm::Type::getInt1Ty(CGF.getLLVMContext());

32 return llvm::FixedVectorType::get(Int1Ty, Ty->getScalarSizeInBits());

33 }

34

35

36 return Ty;

37}

38

39

40

41static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {

43 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {

44 if (BT->getKind() == BuiltinType::LongDouble) {

46 &llvm::APFloat::x87DoubleExtended())

47 return false;

48 }

49 return true;

50 }

52

53

54 unsigned VecSize = Context.getTypeSize(VT);

55 if (VecSize == 128 || VecSize == 256 || VecSize == 512)

56 return true;

57 }

58 return false;

59}

60

61

62

63static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {

64 return NumMembers <= 4;

65}

66

67

68static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {

70 AI.setInReg(true);

71 AI.setCanBeFlattened(false);

72 return AI;

73}

74

75

76

77

78

79

80struct CCState {

82 : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()),

84

85 llvm::SmallBitVector IsPreassigned;

86 unsigned CC = CallingConv::CC_C;

87 unsigned FreeRegs = 0;

88 unsigned FreeSSERegs = 0;

91};

92

93

94class X86_32ABIInfo : public ABIInfo {

98 };

99

100 static const unsigned MinABIStackAlignInBytes = 4;

101

102 bool IsDarwinVectorABI;

103 bool IsRetSmallStructInRegABI;

104 bool IsWin32StructABI;

105 bool IsSoftFloatABI;

106 bool IsMCUABI;

107 bool IsLinuxABI;

108 unsigned DefaultNumRegisterParameters;

109

110 static bool isRegisterSize(unsigned Size) {

111 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);

112 }

113

115

116 return isX86VectorTypeForVectorCall(getContext(), Ty);

117 }

118

120 uint64_t NumMembers) const override {

121

122 return isX86VectorCallAggregateSmallEnough(NumMembers);

123 }

124

125 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;

126

127

128

129 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;

130

131 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;

132

133

134 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;

135

139 unsigned ArgIndex) const;

140

141

142

143 bool updateFreeRegs(QualType Ty, CCState &State) const;

144

145 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,

146 bool &NeedsPadding) const;

147 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;

148

149 bool canExpandIndirectArgument(QualType Ty) const;

150

151

152

154

158 void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const;

159

160public:

161

165

167 bool RetSmallStructInRegABI, bool Win32StructABI,

168 unsigned NumRegisterParameters, bool SoftFloatABI)

169 : ABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),

170 IsRetSmallStructInRegABI(RetSmallStructInRegABI),

171 IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI),

172 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),

173 IsLinuxABI(CGT.getTarget().getTriple().isOSLinux() ||

174 CGT.getTarget().getTriple().isOSCygMing()),

175 DefaultNumRegisterParameters(NumRegisterParameters) {}

176};

177

178class X86_32SwiftABIInfo : public SwiftABIInfo {

179public:

180 explicit X86_32SwiftABIInfo(CodeGenTypes &CGT)

182

184 bool AsReturnValue) const override {

185

186

187

188

190 }

191};

192

194public:

196 bool RetSmallStructInRegABI, bool Win32StructABI,

197 unsigned NumRegisterParameters, bool SoftFloatABI)

199 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,

200 NumRegisterParameters, SoftFloatABI)) {

201 SwiftInfo = std::make_unique<X86_32SwiftABIInfo>(CGT);

202 }

203

204 static bool isStructReturnInRegABI(

205 const llvm::Triple &Triple, const CodeGenOptions &Opts);

206

209

211

213 return 4;

214 }

215

217 llvm::Value *Address) const override;

218

220 StringRef Constraint,

221 llvm::Type* Ty) const override {

222 return X86AdjustInlineAsmType(CGF, Constraint, Ty);

223 }

224

226 std::string &Constraints,

227 std::vector<llvm::Type *> &ResultRegTypes,

228 std::vector<llvm::Type *> &ResultTruncRegTypes,

229 std::vector &ResultRegDests,

230 std::string &AsmString,

231 unsigned NumOutputs) const override;

232

234 return "movl\t%ebp, %ebp"

235 "\t\t// marker for objc_retainAutoreleaseReturnValue";

236 }

237};

238

239}

240

241

242

243

244

245

246

247

248

250 unsigned NumNewOuts,

251 std::string &AsmString) {

252 std::string Buf;

253 llvm::raw_string_ostream OS(Buf);

254 size_t Pos = 0;

255 while (Pos < AsmString.size()) {

256 size_t DollarStart = AsmString.find('$', Pos);

257 if (DollarStart == std:🧵:npos)

258 DollarStart = AsmString.size();

259 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);

260 if (DollarEnd == std:🧵:npos)

261 DollarEnd = AsmString.size();

262 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);

263 Pos = DollarEnd;

264 size_t NumDollars = DollarEnd - DollarStart;

265 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {

266

267 size_t DigitStart = Pos;

268 if (AsmString[DigitStart] == '{') {

269 OS << '{';

270 ++DigitStart;

271 }

272 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);

273 if (DigitEnd == std:🧵:npos)

274 DigitEnd = AsmString.size();

275 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);

276 unsigned OperandIndex;

277 if (!OperandStr.getAsInteger(10, OperandIndex)) {

278 if (OperandIndex >= FirstIn)

279 OperandIndex += NumNewOuts;

280 OS << OperandIndex;

281 } else {

282 OS << OperandStr;

283 }

284 Pos = DigitEnd;

285 }

286 }

287 AsmString = std::move(Buf);

288}

289

290

291void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(

293 std::vector<llvm::Type *> &ResultRegTypes,

294 std::vector<llvm::Type *> &ResultTruncRegTypes,

295 std::vector &ResultRegDests, std::string &AsmString,

296 unsigned NumOutputs) const {

298

299

300

301 if (!Constraints.empty())

302 Constraints += ',';

303 if (RetWidth <= 32) {

304 Constraints += "={eax}";

305 ResultRegTypes.push_back(CGF.Int32Ty);

306 } else {

307

308 Constraints += "=A";

309 ResultRegTypes.push_back(CGF.Int64Ty);

310 }

311

312

313 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);

314 ResultTruncRegTypes.push_back(CoerceTy);

315

316

318 ResultRegDests.push_back(ReturnSlot);

319

321}

322

323

324

325bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,

328

329

330

331 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))

332 return false;

333

335

336

337 if (Size == 64 || Size == 128)

338 return false;

339

340 return true;

341 }

342

343

344

348 return true;

349

350

352 return shouldReturnTypeInRegister(AT->getElementType(), Context);

353

354

356 if (!RT) return false;

357

358

359

360

361

363

365 continue;

366

367

368 if (!shouldReturnTypeInRegister(FD->getType(), Context))

369 return false;

370 }

371 return true;

372}

373

375

377 Ty = CTy->getElementType();

378

379

380

381

384 return false;

385

387 return Size == 32 || Size == 64;

388}

389

391 uint64_t &Size) {

392 for (const auto *FD : RD->fields()) {

393

394

395

397 return false;

398

399

400

401

402 if (FD->isBitField())

403 return false;

404

405 Size += Context.getTypeSize(FD->getType());

406 }

407 return true;

408}

409

411 uint64_t &Size) {

412

415 Size))

416 return false;

417 }

419 return false;

420 return true;

421}

422

423

424

425

426

427bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {

428

430 if (!RT)

431 return false;

434 if (const CXXRecordDecl *CXXRD = dyn_cast(RD)) {

435 if (!IsWin32StructABI) {

436

437

438 if (!CXXRD->isCLike())

439 return false;

440 } else {

441

442 if (CXXRD->isDynamicClass())

443 return false;

444 }

446 return false;

447 } else {

449 return false;

450 }

451

452

453 return Size == getContext().getTypeSize(Ty);

454}

455

456ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {

457

458

459 if (State.CC != llvm::CallingConv::X86_FastCall &&

460 State.CC != llvm::CallingConv::X86_VectorCall && State.FreeRegs) {

461 --State.FreeRegs;

462 if (!IsMCUABI)

463 return getNaturalAlignIndirectInReg(RetTy);

464 }

465 return getNaturalAlignIndirect(RetTy, false);

466}

467

469 CCState &State) const {

472

475 if ((State.CC == llvm::CallingConv::X86_VectorCall ||

476 State.CC == llvm::CallingConv::X86_RegCall) &&

477 isHomogeneousAggregate(RetTy, Base, NumElts)) {

478

480 }

481

483

484 if (IsDarwinVectorABI) {

485 uint64_t Size = getContext().getTypeSize(RetTy);

486

487

488

489

490 if (Size == 128)

492 llvm::Type::getInt64Ty(getVMContext()), 2));

493

494

495

496 if ((Size == 8 || Size == 16 || Size == 32) ||

497 (Size == 64 && VT->getNumElements() == 1))

499 Size));

500

501 return getIndirectReturnResult(RetTy, State);

502 }

503

505 }

506

509

511 return getIndirectReturnResult(RetTy, State);

512 }

513

514

516 return getIndirectReturnResult(RetTy, State);

517

518

521

522

527 llvm::Type::getHalfTy(getVMContext()), 2));

528 }

529

530

531

532 if (shouldReturnTypeInRegister(RetTy, getContext())) {

533 uint64_t Size = getContext().getTypeSize(RetTy);

534

535

536

537

538

539

541 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())

542 || SeltTy->hasPointerRepresentation())

544

545

546

548 }

549

550 return getIndirectReturnResult(RetTy, State);

551 }

552

553

555 RetTy = EnumTy->getDecl()->getIntegerType();

556

558 if (EIT->getNumBits() > 64)

559 return getIndirectReturnResult(RetTy, State);

560

563}

564

565unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,

566 unsigned Align) const {

567

568

569 if (Align <= MinABIStackAlignInBytes)

570 return 0;

571

572 if (IsLinuxABI) {

573

574

575

576

577 if (Ty->isVectorType() && (Align == 16 || Align == 32 || Align == 64))

578 return Align;

579 }

580

581 if (!IsDarwinVectorABI) {

582

583 return MinABIStackAlignInBytes;

584 }

585

586

589 return 16;

590

591 return MinABIStackAlignInBytes;

592}

593

595 CCState &State) const {

596 if (!ByVal) {

597 if (State.FreeRegs) {

598 --State.FreeRegs;

599 if (!IsMCUABI)

600 return getNaturalAlignIndirectInReg(Ty);

601 }

602 return getNaturalAlignIndirect(Ty, false);

603 }

604

605

606 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;

607 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);

608 if (StackAlign == 0)

610

611

612

613 bool Realign = TypeAlign > StackAlign;

615 true, Realign);

616}

617

618X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {

620 if (T)

622

625 if (K == BuiltinType::Float || K == BuiltinType::Double)

627 }

629}

630

631bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {

632 if (!IsSoftFloatABI) {

633 Class C = classify(Ty);

635 return false;

636 }

637

638 unsigned Size = getContext().getTypeSize(Ty);

639 unsigned SizeInRegs = (Size + 31) / 32;

640

641 if (SizeInRegs == 0)

642 return false;

643

644 if (!IsMCUABI) {

645 if (SizeInRegs > State.FreeRegs) {

646 State.FreeRegs = 0;

647 return false;

648 }

649 } else {

650

651

652

653

654 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)

655 return false;

656 }

657

658 State.FreeRegs -= SizeInRegs;

659 return true;

660}

661

662bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,

663 bool &InReg,

664 bool &NeedsPadding) const {

665

666

667

669 return false;

670

671 NeedsPadding = false;

672 InReg = !IsMCUABI;

673

674 if (!updateFreeRegs(Ty, State))

675 return false;

676

677 if (IsMCUABI)

678 return true;

679

680 if (State.CC == llvm::CallingConv::X86_FastCall ||

681 State.CC == llvm::CallingConv::X86_VectorCall ||

682 State.CC == llvm::CallingConv::X86_RegCall) {

683 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)

684 NeedsPadding = true;

685

686 return false;

687 }

688

689 return true;

690}

691

692bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {

693 bool IsPtrOrInt = (getContext().getTypeSize(Ty) <= 32) &&

696

697 if (!IsPtrOrInt && (State.CC == llvm::CallingConv::X86_FastCall ||

698 State.CC == llvm::CallingConv::X86_VectorCall))

699 return false;

700

701 if (!updateFreeRegs(Ty, State))

702 return false;

703

704 if (!IsPtrOrInt && State.CC == llvm::CallingConv::X86_RegCall)

705 return false;

706

707

708 return !IsMCUABI;

709}

710

711void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const {

712

713

714

715

716

717

718

719

720

722 for (int I = 0, E = Args.size(); I < E; ++I) {

725 const QualType &Ty = Args[I].type;

727 isHomogeneousAggregate(Ty, Base, NumElts)) {

728 if (State.FreeSSERegs >= NumElts) {

729 State.FreeSSERegs -= NumElts;

731 State.IsPreassigned.set(I);

732 }

733 }

734 }

735}

736

737ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State,

738 unsigned ArgIndex) const {

739

740 bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;

741 bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;

742 bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;

743

745 TypeInfo TI = getContext().getTypeInfo(Ty);

746

747

749 if (RT) {

752 return getIndirectResult(Ty, false, State);

753 } else if (State.IsDelegateCall) {

754

755

756 ABIArgInfo Res = getIndirectResult(Ty, false, State);

758 return Res;

760

762 }

763 }

764

765

766

769 if ((IsRegCall || IsVectorCall) &&

770 isHomogeneousAggregate(Ty, Base, NumElts)) {

771 if (State.FreeSSERegs >= NumElts) {

772 State.FreeSSERegs -= NumElts;

773

774

775

776 if (IsVectorCall)

777 return getDirectX86Hva();

778

782 }

785 return getIndirectResult(Ty, false, State);

786 }

787

789

790

792 return getIndirectResult(Ty, true, State);

793

794

795 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))

797

798

799 if (TI.Width == 0)

801

802 llvm::LLVMContext &LLVMContext = getVMContext();

803 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);

804 bool NeedsPadding = false;

805 bool InReg;

806 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {

807 unsigned SizeInRegs = (TI.Width + 31) / 32;

809 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);

810 if (InReg)

812 else

814 }

815 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;

816

817

818

819

820

821

822 if (IsWin32StructABI && State.Required.isRequiredArg(ArgIndex)) {

823 unsigned AlignInBits = 0;

824 if (RT) {

826 getContext().getASTRecordLayout(RT->getDecl());

829 AlignInBits = TI.Align;

830 }

831 if (AlignInBits > 32)

832 return getIndirectResult(Ty, false, State);

833 }

834

835

836

837

838

839

840

841 if (TI.Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&

842 canExpandIndirectArgument(Ty))

844 IsFastCall || IsVectorCall || IsRegCall, PaddingType);

845

846 return getIndirectResult(Ty, true, State);

847 }

848

850

851

852

853 if (IsWin32StructABI) {

854 if (TI.Width <= 512 && State.FreeSSERegs > 0) {

855 --State.FreeSSERegs;

857 }

858 return getIndirectResult(Ty, false, State);

859 }

860

861

862

863 if (IsDarwinVectorABI) {

864 if ((TI.Width == 8 || TI.Width == 16 || TI.Width == 32) ||

865 (TI.Width == 64 && VT->getNumElements() == 1))

867 llvm::IntegerType::get(getVMContext(), TI.Width));

868 }

869

870 if (IsX86_MMXType(CGT.ConvertType(Ty)))

872

874 }

875

876

878 Ty = EnumTy->getDecl()->getIntegerType();

879

880 bool InReg = shouldPrimitiveUseInReg(Ty, State);

881

882 if (isPromotableIntegerTypeForABI(Ty)) {

883 if (InReg)

886 }

887

889 if (EIT->getNumBits() <= 64) {

890 if (InReg)

893 }

894 return getIndirectResult(Ty, false, State);

895 }

896

897 if (InReg)

900}

901

902void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {

903 CCState State(FI);

904 if (IsMCUABI)

905 State.FreeRegs = 3;

906 else if (State.CC == llvm::CallingConv::X86_FastCall) {

907 State.FreeRegs = 2;

908 State.FreeSSERegs = 3;

909 } else if (State.CC == llvm::CallingConv::X86_VectorCall) {

910 State.FreeRegs = 2;

911 State.FreeSSERegs = 6;

914 else if (State.CC == llvm::CallingConv::X86_RegCall) {

915 State.FreeRegs = 5;

916 State.FreeSSERegs = 8;

917 } else if (IsWin32StructABI) {

918

919

920 State.FreeRegs = DefaultNumRegisterParameters;

921 State.FreeSSERegs = 3;

922 } else

923 State.FreeRegs = DefaultNumRegisterParameters;

924

928

929

930 if (State.FreeRegs) {

931 --State.FreeRegs;

932 if (!IsMCUABI)

934 }

935 }

936

937

939 ++State.FreeRegs;

940

941

942

943 if (State.CC == llvm::CallingConv::X86_VectorCall)

944 runVectorCallFirstPass(FI, State);

945

946 bool UsedInAlloca = false;

948 for (unsigned I = 0, E = Args.size(); I < E; ++I) {

949

950 if (State.IsPreassigned.test(I))

951 continue;

952

953 Args[I].info =

956 }

957

958

959

960 if (UsedInAlloca)

961 rewriteWithInAlloca(FI);

962}

963

964void

968

970 assert(StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct");

971

972

973

974

975 bool IsIndirect = false;

977 IsIndirect = true;

979 llvm::Type *LLTy = CGT.ConvertTypeForMem(Type);

980 if (IsIndirect)

981 LLTy = llvm::PointerType::getUnqual(getVMContext());

982 FrameFields.push_back(LLTy);

983 StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(Type);

984

985

986 CharUnits FieldEnd = StackOffset;

987 StackOffset = FieldEnd.alignTo(WordSize);

988 if (StackOffset != FieldEnd) {

989 CharUnits NumBytes = StackOffset - FieldEnd;

990 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());

991 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());

992 FrameFields.push_back(Ty);

993 }

994}

995

997

998 switch (Info.getKind()) {

1000 return true;

1003 return false;

1010

1011

1012 return true;

1013 }

1014 llvm_unreachable("invalid enum");

1015}

1016

1017void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {

1018 assert(IsWin32StructABI && "inalloca only supported on win32");

1019

1020

1022

1023

1025

1028

1029

1030 bool IsThisCall =

1033 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&

1035 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);

1036 ++I;

1037 }

1038

1039

1040 if (Ret.isIndirect() && Ret.getInReg()) {

1041 addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.getReturnType());

1042

1043 Ret.setInAllocaSRet(IsWin32StructABI);

1044 }

1045

1046

1047 if (IsThisCall)

1048 ++I;

1049

1050

1051 for (; I != E; ++I) {

1053 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);

1054 }

1055

1056 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,

1057 true),

1058 StackAlign);

1059}

1060

1063

1064 auto TypeInfo = getContext().getTypeInfoInChars(Ty);

1065

1068

1071

1072

1073

1074

1075

1077 getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity()));

1078

1081 true, Slot);

1082}

1083

1084bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(

1085 const llvm::Triple &Triple, const CodeGenOptions &Opts) {

1086 assert(Triple.getArch() == llvm::Triple::x86);

1087

1088 switch (Opts.getStructReturnConvention()) {

1090 break;

1092 return false;

1094 return true;

1095 }

1096

1097 if (Triple.isOSDarwin() || Triple.isOSIAMCU())

1098 return true;

1099

1100 switch (Triple.getOS()) {

1101 case llvm::Triple::DragonFly:

1102 case llvm::Triple::FreeBSD:

1103 case llvm::Triple::OpenBSD:

1104 case llvm::Triple::Win32:

1105 return true;

1106 default:

1107 return false;

1108 }

1109}

1110

1113 if (!FD->hasAttr())

1114 return;

1115

1116 llvm::Function *Fn = castllvm::Function(GV);

1117 Fn->setCallingConv(llvm::CallingConv::X86_INTR);

1119 return;

1120

1122 llvm::Type *ByValTy = CGM.getTypes().ConvertType(PtrTy->getPointeeType());

1123 llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(

1124 Fn->getContext(), ByValTy);

1125 Fn->addParamAttr(0, NewAttr);

1126}

1127

1128void X86_32TargetCodeGenInfo::setTargetAttributes(

1130 if (GV->isDeclaration())

1131 return;

1132 if (const FunctionDecl *FD = dyn_cast_or_null(D)) {

1133 if (FD->hasAttr()) {

1134 llvm::Function *Fn = castllvm::Function(GV);

1135 Fn->addFnAttr("stackrealign");

1136 }

1137

1139 }

1140}

1141

1142bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(

1144 llvm::Value *Address) const {

1146

1147 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);

1148

1149

1150

1151

1153

1155

1156

1157

1158 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);

1160

1161 } else {

1162

1163

1164 Builder.CreateAlignedStore(

1165 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),

1167

1168

1169

1170

1171 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);

1173 }

1174

1175 return false;

1176}

1177

1178

1179

1180

1181

1182

1183namespace {

1184

1185

1186static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {

1187 switch (AVXLevel) {

1188 case X86AVXABILevel::AVX512:

1189 return 512;

1190 case X86AVXABILevel::AVX:

1191 return 256;

1192 case X86AVXABILevel::None:

1193 return 128;

1194 }

1195 llvm_unreachable("Unknown AVXLevel");

1196}

1197

1198

1199class X86_64ABIInfo : public ABIInfo {

1202 SSE,

1203 SSEUp,

1204 X87,

1205 X87Up,

1206 ComplexX87,

1207 NoClass,

1208 Memory

1209 };

1210

1211

1212

1213

1214

1215

1216

1217

1218

1219

1220 static Class merge(Class Accum, Class Field);

1221

1222

1223

1224

1225

1226

1227

1228

1229

1230

1231

1232

1233

1234

1235

1236 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;

1237

1238

1239

1240

1241

1242

1243

1244

1245

1246

1247

1248

1249

1250

1251

1252

1253

1254

1255

1256

1257

1258

1259

1260

1261

1262

1263

1264 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,

1265 bool isNamedArg, bool IsRegCall = false) const;

1266

1267 llvm::Type *GetByteVectorType(QualType Ty) const;

1268 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,

1269 unsigned IROffset, QualType SourceTy,

1270 unsigned SourceOffset) const;

1271 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,

1272 unsigned IROffset, QualType SourceTy,

1273 unsigned SourceOffset) const;

1274

1275

1276

1278

1279

1280

1281

1282

1283

1284 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;

1285

1287

1289 unsigned &neededInt, unsigned &neededSSE,

1290 bool isNamedArg,

1291 bool IsRegCall = false) const;

1292

1293 ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,

1294 unsigned &NeededSSE,

1295 unsigned &MaxVectorWidth) const;

1296

1297 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,

1298 unsigned &NeededSSE,

1299 unsigned &MaxVectorWidth) const;

1300

1301 bool IsIllegalVectorType(QualType Ty) const;

1302

1303

1304

1305

1306

1307

1308 bool honorsRevision0_98() const {

1310 }

1311

1312

1313

1314 bool classifyIntegerMMXAsSSE() const {

1315

1316 if (getContext().getLangOpts().getClangABICompat() <=

1317 LangOptions::ClangABI::Ver3_8)

1318 return false;

1319

1321 if (Triple.isOSDarwin() || Triple.isPS() || Triple.isOSFreeBSD())

1322 return false;

1323 return true;

1324 }

1325

1326

1327 bool passInt128VectorsInMem() const {

1328

1329 if (getContext().getLangOpts().getClangABICompat() <=

1330 LangOptions::ClangABI::Ver9)

1331 return false;

1332

1334 return T.isOSLinux() || T.isOSNetBSD();

1335 }

1336

1338

1339

1340 bool Has64BitPointers;

1341

1342public:

1344 : ABIInfo(CGT), AVXLevel(AVXLevel),

1345 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {}

1346

1347 bool isPassedUsingAVXType(QualType type) const {

1348 unsigned neededInt, neededSSE;

1349

1351 true);

1354 if (llvm::VectorType *vectorTy = dyn_cast_or_nullllvm::VectorType(ty))

1355 return vectorTy->getPrimitiveSizeInBits().getFixedValue() > 128;

1356 }

1357 return false;

1358 }

1359

1361

1366

1367 bool has64BitPointers() const {

1368 return Has64BitPointers;

1369 }

1370};

1371

1372

1373class WinX86_64ABIInfo : public ABIInfo {

1374public:

1376 : ABIInfo(CGT), AVXLevel(AVXLevel),

1377 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}

1378

1380

1383

1385

1386 return isX86VectorTypeForVectorCall(getContext(), Ty);

1387 }

1388

1390 uint64_t NumMembers) const override {

1391

1392 return isX86VectorCallAggregateSmallEnough(NumMembers);

1393 }

1394

1395private:

1396 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType,

1397 bool IsVectorCall, bool IsRegCall) const;

1398 ABIArgInfo reclassifyHvaArgForVectorCall(QualType Ty, unsigned &FreeSSERegs,

1400

1402

1403 bool IsMingw64;

1404};

1405

1407public:

1410 SwiftInfo =

1411 std::make_unique(CGT, true);

1412 }

1413

1414

1415

1417

1419 return 7;

1420 }

1421

1423 llvm::Value *Address) const override {

1424 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);

1425

1426

1427

1429 return false;

1430 }

1431

1433 StringRef Constraint,

1434 llvm::Type* Ty) const override {

1435 return X86AdjustInlineAsmType(CGF, Constraint, Ty);

1436 }

1437

1440

1441

1442

1443

1444

1445

1447 bool HasAVXType = false;

1448 for (CallArgList::const_iterator

1449 it = args.begin(), ie = args.end(); it != ie; ++it) {

1450 if (getABIInfo<X86_64ABIInfo>().isPassedUsingAVXType(it->Ty)) {

1451 HasAVXType = true;

1452 break;

1453 }

1454 }

1455

1456 if (!HasAVXType)

1457 return true;

1458 }

1459

1461 }

1462

1465 if (GV->isDeclaration())

1466 return;

1467 if (const FunctionDecl *FD = dyn_cast_or_null(D)) {

1468 if (FD->hasAttr()) {

1469 llvm::Function *Fn = castllvm::Function(GV);

1470 Fn->addFnAttr("stackrealign");

1471 }

1472

1474 }

1475 }

1476

1480 QualType ReturnType) const override;

1481};

1482}

1483

1485 llvm::StringMap &CallerMap,

1487 llvm::StringMap &CalleeMap,

1489 if (CalleeMap.empty() && CallerMap.empty()) {

1490

1491

1492

1495 }

1496}

1497

1500 const llvm::StringMap &CallerMap,

1501 const llvm::StringMap &CalleeMap,

1502 QualType Ty, StringRef Feature,

1503 bool IsArgument) {

1504 bool CallerHasFeat = CallerMap.lookup(Feature);

1505 bool CalleeHasFeat = CalleeMap.lookup(Feature);

1506 if (!CallerHasFeat && !CalleeHasFeat)

1507 return Diag.Report(CallLoc, diag::warn_avx_calling_convention)

1508 << IsArgument << Ty << Feature;

1509

1510

1511 if (!CallerHasFeat || !CalleeHasFeat)

1512 return Diag.Report(CallLoc, diag::err_avx_calling_convention)

1513 << IsArgument << Ty << Feature;

1514

1515

1516

1517 return false;

1518}

1519

1522 const llvm::StringMap &CallerMap,

1523 const llvm::StringMap &CalleeMap,

1524 QualType Ty, bool IsArgument) {

1525 bool Caller256 = CallerMap.lookup("avx512f") && !CallerMap.lookup("evex512");

1526 bool Callee256 = CalleeMap.lookup("avx512f") && !CalleeMap.lookup("evex512");

1527

1528

1529

1530 if (Caller256 || Callee256)

1531 return Diag.Report(CallLoc, diag::err_avx_calling_convention)

1532 << IsArgument << Ty << "evex512";

1533

1535 "avx512f", IsArgument);

1536}

1537

1540 const llvm::StringMap &CallerMap,

1541 const llvm::StringMap &CalleeMap, QualType Ty,

1542 bool IsArgument) {

1544 if (Size > 256)

1546 IsArgument);

1547

1548 if (Size > 128)

1550 IsArgument);

1551

1552 return false;

1553}

1554

1555void X86_64TargetCodeGenInfo::checkFunctionCallABI(CodeGenModule &CGM,

1560 QualType ReturnType) const {

1561 if (!Callee)

1562 return;

1563

1564 llvm::StringMap CallerMap;

1565 llvm::StringMap CalleeMap;

1566 unsigned ArgIndex = 0;

1567

1568

1569

1570 for (const CallArg &Arg : Args) {

1571

1572

1573

1574

1575

1576

1577

1578 if (Arg.getType()->isVectorType() &&

1581 QualType Ty = Arg.getType();

1582

1583

1584 if (ArgIndex < Callee->getNumParams())

1585 Ty = Callee->getParamDecl(ArgIndex)->getType();

1586

1588 CalleeMap, Ty, true))

1589 return;

1590 }

1591 ++ArgIndex;

1592 }

1593

1594

1595

1596 if (Callee->getReturnType()->isVectorType() &&

1600 CalleeMap, Callee->getReturnType(),

1601 false);

1602 }

1603}

1604

1606

1607

1608

1609 bool Quote = Lib.contains(' ');

1610 std::string ArgStr = Quote ? "\"" : "";

1611 ArgStr += Lib;

1612 if (!Lib.ends_with_insensitive(".lib") && !Lib.ends_with_insensitive(".a"))

1613 ArgStr += ".lib";

1614 ArgStr += Quote ? "\"" : "";

1615 return ArgStr;

1616}

1617

1618namespace {

1619class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {

1620public:

1622 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,

1623 unsigned NumRegisterParameters)

1624 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,

1625 Win32StructABI, NumRegisterParameters, false) {}

1626

1627 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,

1629

1630 void getDependentLibraryOption(llvm::StringRef Lib,

1632 Opt = "/DEFAULTLIB:";

1633 Opt += qualifyWindowsLibrary(Lib);

1634 }

1635

1636 void getDetectMismatchOption(llvm::StringRef Name,

1637 llvm::StringRef Value,

1639 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";

1640 }

1641};

1642}

1643

1644void WinX86_32TargetCodeGenInfo::setTargetAttributes(

1646 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);

1647 if (GV->isDeclaration())

1648 return;

1649 addStackProbeTargetAttributes(D, GV, CGM);

1650}

1651

1652namespace {

1654public:

1658 SwiftInfo =

1659 std::make_unique(CGT, true);

1660 }

1661

1662 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,

1664

1666 return 7;

1667 }

1668

1670 llvm::Value *Address) const override {

1671 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);

1672

1673

1674

1676 return false;

1677 }

1678

1679 void getDependentLibraryOption(llvm::StringRef Lib,

1681 Opt = "/DEFAULTLIB:";

1682 Opt += qualifyWindowsLibrary(Lib);

1683 }

1684

1685 void getDetectMismatchOption(llvm::StringRef Name,

1686 llvm::StringRef Value,

1688 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";

1689 }

1690};

1691}

1692

1693void WinX86_64TargetCodeGenInfo::setTargetAttributes(

1696 if (GV->isDeclaration())

1697 return;

1698 if (const FunctionDecl *FD = dyn_cast_or_null(D)) {

1699 if (FD->hasAttr()) {

1700 llvm::Function *Fn = castllvm::Function(GV);

1701 Fn->addFnAttr("stackrealign");

1702 }

1703

1705 }

1706

1707 addStackProbeTargetAttributes(D, GV, CGM);

1708}

1709

1710void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,

1711 Class &Hi) const {

1712

1713

1714

1715

1716

1717

1718

1719

1720

1721

1722

1723

1724

1725

1726

1727

1728

1729

1730

1731

1732

1733 if (Hi == Memory)

1734 Lo = Memory;

1735 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())

1736 Lo = Memory;

1737 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))

1738 Lo = Memory;

1739 if (Hi == SSEUp && Lo != SSE)

1740 Hi = SSE;

1741}

1742

1743X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {

1744

1745

1746

1747

1748

1749

1750

1751

1752

1753

1754

1755

1756

1757

1758

1759

1760

1761

1762

1763

1764

1765

1766

1767 assert((Accum != Memory && Accum != ComplexX87) &&

1768 "Invalid accumulated classification during merge.");

1769 if (Accum == Field || Field == NoClass)

1770 return Accum;

1771 if (Field == Memory)

1772 return Memory;

1773 if (Accum == NoClass)

1777 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||

1778 Accum == X87 || Accum == X87Up)

1779 return Memory;

1780 return SSE;

1781}

1782

1783void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,

1784 Class &Hi, bool isNamedArg, bool IsRegCall) const {

1785

1786

1787

1788

1789

1790

1791

1792

1793 Lo = Hi = NoClass;

1794

1795 Class &Current = OffsetBase < 64 ? Lo : Hi;

1796 Current = Memory;

1797

1800

1801 if (k == BuiltinType::Void) {

1802 Current = NoClass;

1803 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {

1806 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {

1808 } else if (k == BuiltinType::Float || k == BuiltinType::Double ||

1809 k == BuiltinType::Float16 || k == BuiltinType::BFloat16) {

1810 Current = SSE;

1811 } else if (k == BuiltinType::Float128) {

1812 Lo = SSE;

1813 Hi = SSEUp;

1814 } else if (k == BuiltinType::LongDouble) {

1815 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();

1816 if (LDF == &llvm::APFloat::IEEEquad()) {

1817 Lo = SSE;

1818 Hi = SSEUp;

1819 } else if (LDF == &llvm::APFloat::x87DoubleExtended()) {

1820 Lo = X87;

1821 Hi = X87Up;

1822 } else if (LDF == &llvm::APFloat::IEEEdouble()) {

1823 Current = SSE;

1824 } else

1825 llvm_unreachable("unexpected long double representation!");

1826 }

1827

1828

1829 return;

1830 }

1831

1833

1834 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);

1835 return;

1836 }

1837

1840 return;

1841 }

1842

1845 if (Has64BitPointers) {

1846

1847

1849 } else {

1850

1851

1852 uint64_t EB_FuncPtr = (OffsetBase) / 64;

1853 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;

1854 if (EB_FuncPtr != EB_ThisAdj) {

1856 } else {

1858 }

1859 }

1860 } else {

1862 }

1863 return;

1864 }

1865

1867 uint64_t Size = getContext().getTypeSize(VT);

1868 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {

1869

1870

1871

1872

1874

1875

1876

1877 uint64_t EB_Lo = (OffsetBase) / 64;

1878 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;

1879 if (EB_Lo != EB_Hi)

1880 Hi = Lo;

1881 } else if (Size == 64) {

1882 QualType ElementType = VT->getElementType();

1883

1884

1886 return;

1887

1888

1889

1890

1891 if (!classifyIntegerMMXAsSSE() &&

1897 else

1898 Current = SSE;

1899

1900

1901

1902 if (OffsetBase && OffsetBase != 64)

1903 Hi = Lo;

1904 } else if (Size == 128 ||

1905 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {

1906 QualType ElementType = VT->getElementType();

1907

1908

1909 if (passInt128VectorsInMem() && Size != 128 &&

1912 return;

1913

1914

1915

1916

1917

1918

1919

1920

1921

1922

1923

1924

1925

1926

1927

1928 Lo = SSE;

1929 Hi = SSEUp;

1930 }

1931 return;

1932 }

1933

1936

1937 uint64_t Size = getContext().getTypeSize(Ty);

1939 if (Size <= 64)

1941 else if (Size <= 128)

1943 } else if (ET->isFloat16Type() || ET == getContext().FloatTy ||

1945 Current = SSE;

1946 } else if (ET == getContext().DoubleTy) {

1947 Lo = Hi = SSE;

1948 } else if (ET == getContext().LongDoubleTy) {

1949 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();

1950 if (LDF == &llvm::APFloat::IEEEquad())

1951 Current = Memory;

1952 else if (LDF == &llvm::APFloat::x87DoubleExtended())

1953 Current = ComplexX87;

1954 else if (LDF == &llvm::APFloat::IEEEdouble())

1955 Lo = Hi = SSE;

1956 else

1957 llvm_unreachable("unexpected long double representation!");

1958 }

1959

1960

1961

1962 uint64_t EB_Real = (OffsetBase) / 64;

1963 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;

1964 if (Hi == NoClass && EB_Real != EB_Imag)

1965 Hi = Lo;

1966

1967 return;

1968 }

1969

1971 if (EITy->getNumBits() <= 64)

1973 else if (EITy->getNumBits() <= 128)

1975

1976 return;

1977 }

1978

1979 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {

1980

1981

1982 uint64_t Size = getContext().getTypeSize(Ty);

1983

1984

1985

1986

1987

1988 if (!IsRegCall && Size > 512)

1989 return;

1990

1991

1992

1993

1994

1995 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))

1996 return;

1997

1998

1999

2000 Current = NoClass;

2001 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());

2002 uint64_t ArraySize = AT->getZExtSize();

2003

2004

2005

2006

2007

2008 if (Size > 128 &&

2009 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))

2010 return;

2011

2012 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {

2013 Class FieldLo, FieldHi;

2014 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);

2015 Lo = merge(Lo, FieldLo);

2016 Hi = merge(Hi, FieldHi);

2017 if (Lo == Memory || Hi == Memory)

2018 break;

2019 }

2020

2021 postMerge(Size, Lo, Hi);

2022 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");

2023 return;

2024 }

2025

2027 uint64_t Size = getContext().getTypeSize(Ty);

2028

2029

2030

2031 if (Size > 512)

2032 return;

2033

2034

2035

2036

2038 return;

2039

2041

2042

2044 return;

2045

2046 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);

2047

2048

2049 Current = NoClass;

2050

2051

2052 if (const CXXRecordDecl *CXXRD = dyn_cast(RD)) {

2053 for (const auto &I : CXXRD->bases()) {

2054 assert(!I.isVirtual() && !I.getType()->isDependentType() &&

2055 "Unexpected base class!");

2056 const auto *Base =

2057 cast(I.getType()->castAs<RecordType>()->getDecl());

2058

2059

2060

2061

2062

2063

2064 Class FieldLo, FieldHi;

2067 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);

2068 Lo = merge(Lo, FieldLo);

2069 Hi = merge(Hi, FieldHi);

2070 if (Lo == Memory || Hi == Memory) {

2071 postMerge(Size, Lo, Hi);

2072 return;

2073 }

2074 }

2075 }

2076

2077

2078 unsigned idx = 0;

2079 bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=

2081 getContext().getTargetInfo().getTriple().isPS();

2082 bool IsUnion = RT->isUnionType() && !UseClang11Compat;

2083

2085 i != e; ++i, ++idx) {

2087 bool BitField = i->isBitField();

2088

2089

2090 if (BitField && i->isUnnamedBitField())

2091 continue;

2092

2093

2094

2095

2096

2097

2098

2099

2100

2101

2102 if (Size > 128 &&

2103 ((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||

2104 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {

2105 Lo = Memory;

2106 postMerge(Size, Lo, Hi);

2107 return;

2108 }

2109

2110 bool IsInMemory =

2111 Offset % getContext().getTypeAlign(i->getType().getCanonicalType());

2112

2113 if (!BitField && IsInMemory) {

2114 Lo = Memory;

2115 postMerge(Size, Lo, Hi);

2116 return;

2117 }

2118

2119

2120

2121

2122

2123

2124

2125 Class FieldLo, FieldHi;

2126

2127

2128

2129

2130 if (BitField) {

2131 assert(!i->isUnnamedBitField());

2134

2135 uint64_t EB_Lo = Offset / 64;

2137

2138 if (EB_Lo) {

2139 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");

2140 FieldLo = NoClass;

2142 } else {

2144 FieldHi = EB_Hi ? Integer : NoClass;

2145 }

2146 } else

2147 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);

2148 Lo = merge(Lo, FieldLo);

2149 Hi = merge(Hi, FieldHi);

2150 if (Lo == Memory || Hi == Memory)

2151 break;

2152 }

2153

2154 postMerge(Size, Lo, Hi);

2155 }

2156}

2157

2158ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {

2159

2160

2162

2164 Ty = EnumTy->getDecl()->getIntegerType();

2165

2167 return getNaturalAlignIndirect(Ty);

2168

2171 }

2172

2173 return getNaturalAlignIndirect(Ty);

2174}

2175

2176bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {

2178 uint64_t Size = getContext().getTypeSize(VecTy);

2179 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);

2180 if (Size <= 64 || Size > LargestVector)

2181 return true;

2182 QualType EltTy = VecTy->getElementType();

2183 if (passInt128VectorsInMem() &&

2186 return true;

2187 }

2188

2189 return false;

2190}

2191

2193 unsigned freeIntRegs) const {

2194

2195

2196

2197

2198

2199

2200

2201

2204

2206 Ty = EnumTy->getDecl()->getIntegerType();

2207

2210 }

2211

2214

2215

2216

2217 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);

2218

2219

2220

2221

2222

2223

2224

2225

2226

2227

2228

2229

2230

2231

2232

2233

2234

2235

2236

2237

2238

2239

2240 if (freeIntRegs == 0) {

2241 uint64_t Size = getContext().getTypeSize(Ty);

2242

2243

2244

2245 if (Align == 8 && Size <= 64)

2247 Size));

2248 }

2249

2251}

2252

2253

2254

2255llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {

2256

2257

2260

2261 llvm::Type *IRType = CGT.ConvertType(Ty);

2262 if (isallvm::VectorType(IRType)) {

2263

2264

2265 if (passInt128VectorsInMem() &&

2266 castllvm::VectorType(IRType)->getElementType()->isIntegerTy(128)) {

2267

2268 uint64_t Size = getContext().getTypeSize(Ty);

2269 return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),

2270 Size / 64);

2271 }

2272

2273 return IRType;

2274 }

2275

2276 if (IRType->getTypeID() == llvm::Type::FP128TyID)

2277 return IRType;

2278

2279

2280 uint64_t Size = getContext().getTypeSize(Ty);

2281 assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!");

2282

2283

2284

2285 return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),

2286 Size / 64);

2287}

2288

2289

2290

2291

2292

2293

2294

2295

2297 unsigned EndBit, ASTContext &Context) {

2298

2299

2300

2302 if (TySize <= StartBit)

2303 return true;

2304

2307 unsigned NumElts = (unsigned)AT->getZExtSize();

2308

2309

2310 for (unsigned i = 0; i != NumElts; ++i) {

2311

2312 unsigned EltOffset = i*EltSize;

2313 if (EltOffset >= EndBit) break;

2314

2315 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;

2317 EndBit-EltOffset, Context))

2318 return false;

2319 }

2320

2321 return true;

2322 }

2323

2327

2328

2329 if (const CXXRecordDecl *CXXRD = dyn_cast(RD)) {

2330 for (const auto &I : CXXRD->bases()) {

2331 assert(!I.isVirtual() && !I.getType()->isDependentType() &&

2332 "Unexpected base class!");

2333 const auto *Base =

2334 cast(I.getType()->castAs<RecordType>()->getDecl());

2335

2336

2338 if (BaseOffset >= EndBit) continue;

2339

2340 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;

2342 EndBit-BaseOffset, Context))

2343 return false;

2344 }

2345 }

2346

2347

2348

2349

2350

2351 unsigned idx = 0;

2353 i != e; ++i, ++idx) {

2355

2356

2357 if (FieldOffset >= EndBit) break;

2358

2359 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;

2361 Context))

2362 return false;

2363 }

2364

2365

2366

2367 return true;

2368 }

2369

2370 return false;

2371}

2372

2373

2375 const llvm::DataLayout &TD) {

2376 if (IROffset == 0 && IRType->isFloatingPointTy())

2377 return IRType;

2378

2379

2380 if (llvm::StructType *STy = dyn_castllvm::StructType(IRType)) {

2381 if (!STy->getNumContainedTypes())

2382 return nullptr;

2383

2384 const llvm::StructLayout *SL = TD.getStructLayout(STy);

2385 unsigned Elt = SL->getElementContainingOffset(IROffset);

2386 IROffset -= SL->getElementOffset(Elt);

2387 return getFPTypeAtOffset(STy->getElementType(Elt), IROffset, TD);

2388 }

2389

2390

2391 if (llvm::ArrayType *ATy = dyn_castllvm::ArrayType(IRType)) {

2392 llvm::Type *EltTy = ATy->getElementType();

2393 unsigned EltSize = TD.getTypeAllocSize(EltTy);

2394 IROffset -= IROffset / EltSize * EltSize;

2396 }

2397

2398 return nullptr;

2399}

2400

2401

2402

2403llvm::Type *X86_64ABIInfo::

2404GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,

2405 QualType SourceTy, unsigned SourceOffset) const {

2406 const llvm::DataLayout &TD = getDataLayout();

2407 unsigned SourceSize =

2408 (unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset;

2410 if (!T0 || T0->isDoubleTy())

2411 return llvm::Type::getDoubleTy(getVMContext());

2412

2413

2414 llvm::Type *T1 = nullptr;

2415 unsigned T0Size = TD.getTypeAllocSize(T0);

2416 if (SourceSize > T0Size)

2418 if (T1 == nullptr) {

2419

2420

2421 if (T0->is16bitFPTy() && SourceSize > 4)

2423

2424

2425

2426 if (T1 == nullptr)

2427 return T0;

2428 }

2429

2430 if (T0->isFloatTy() && T1->isFloatTy())

2431 return llvm::FixedVectorType::get(T0, 2);

2432

2433 if (T0->is16bitFPTy() && T1->is16bitFPTy()) {

2434 llvm::Type *T2 = nullptr;

2435 if (SourceSize > 4)

2437 if (T2 == nullptr)

2438 return llvm::FixedVectorType::get(T0, 2);

2439 return llvm::FixedVectorType::get(T0, 4);

2440 }

2441

2442 if (T0->is16bitFPTy() || T1->is16bitFPTy())

2443 return llvm::FixedVectorType::get(llvm::Type::getHalfTy(getVMContext()), 4);

2444

2445 return llvm::Type::getDoubleTy(getVMContext());

2446}

2447

2448

2449

2450

2451

2452

2453

2454

2455

2456

2457

2458

2459

2460

2461

2462

2463llvm::Type *X86_64ABIInfo::

2464GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,

2465 QualType SourceTy, unsigned SourceOffset) const {

2466

2467

2468 if (IROffset == 0) {

2469

2470 if ((isallvm::PointerType(IRType) && Has64BitPointers) ||

2471 IRType->isIntegerTy(64))

2472 return IRType;

2473

2474

2475

2476

2477

2478

2479

2480 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||

2481 IRType->isIntegerTy(32) ||

2482 (isallvm::PointerType(IRType) && !Has64BitPointers)) {

2483 unsigned BitWidth = isallvm::PointerType(IRType) ? 32 :

2484 castllvm::IntegerType(IRType)->getBitWidth();

2485

2487 SourceOffset*8+64, getContext()))

2488 return IRType;

2489 }

2490 }

2491

2492 if (llvm::StructType *STy = dyn_castllvm::StructType(IRType)) {

2493

2494 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);

2495 if (IROffset < SL->getSizeInBytes()) {

2496 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);

2497 IROffset -= SL->getElementOffset(FieldIdx);

2498

2499 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,

2500 SourceTy, SourceOffset);

2501 }

2502 }

2503

2504 if (llvm::ArrayType *ATy = dyn_castllvm::ArrayType(IRType)) {

2505 llvm::Type *EltTy = ATy->getElementType();

2506 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);

2507 unsigned EltOffset = IROffset/EltSize*EltSize;

2508 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,

2509 SourceOffset);

2510 }

2511

2512

2513

2514 unsigned TySizeInBytes =

2515 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();

2516

2517 assert(TySizeInBytes != SourceOffset && "Empty field?");

2518

2519

2520

2521 return llvm::IntegerType::get(getVMContext(),

2522 std::min(TySizeInBytes-SourceOffset, 8U)*8);

2523}

2524

2525

2526

2527

2528

2529

2530

2531static llvm::Type *

2533 const llvm::DataLayout &TD) {

2534

2535

2536

2537

2538 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);

2539 llvm::Align HiAlign = TD.getABITypeAlign(Hi);

2540 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);

2541 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");

2542

2543

2544

2545

2546

2547 if (HiStart != 8) {

2548

2549

2550

2551

2552

2553 if (Lo->isHalfTy() || Lo->isFloatTy())

2554 Lo = llvm::Type::getDoubleTy(Lo->getContext());

2555 else {

2556 assert((Lo->isIntegerTy() || Lo->isPointerTy())

2557 && "Invalid/unknown lo type");

2558 Lo = llvm::Type::getInt64Ty(Lo->getContext());

2559 }

2560 }

2561

2562 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);

2563

2564

2565 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&

2566 "Invalid x86-64 argument pair!");

2568}

2569

2571classifyReturnType(QualType RetTy) const {

2572

2573

2574 X86_64ABIInfo::Class Lo, Hi;

2575 classify(RetTy, 0, Lo, Hi, true);

2576

2577

2578 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");

2579 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");

2580

2581 llvm::Type *ResType = nullptr;

2582 switch (Lo) {

2583 case NoClass:

2584 if (Hi == NoClass)

2586

2587

2588 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&

2589 "Unknown missing lo part");

2590 break;

2591

2592 case SSEUp:

2593 case X87Up:

2594 llvm_unreachable("Invalid classification for lo word.");

2595

2596

2597

2598 case Memory:

2599 return getIndirectReturnResult(RetTy);

2600

2601

2602

2604 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);

2605

2606

2607

2608 if (Hi == NoClass && isallvm::IntegerType(ResType)) {

2609

2611 RetTy = EnumTy->getDecl()->getIntegerType();

2612

2614 isPromotableIntegerTypeForABI(RetTy))

2616 }

2617 break;

2618

2619

2620

2621 case SSE:

2622 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);

2623 break;

2624

2625

2626

2627 case X87:

2628 ResType = llvm::Type::getX86_FP80Ty(getVMContext());

2629 break;

2630

2631

2632

2633

2634 case ComplexX87:

2635 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");

2636 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),

2637 llvm::Type::getX86_FP80Ty(getVMContext()));

2638 break;

2639 }

2640

2641 llvm::Type *HighPart = nullptr;

2642 switch (Hi) {

2643

2644

2645 case Memory:

2646 case X87:

2647 llvm_unreachable("Invalid classification for hi word.");

2648

2649 case ComplexX87:

2650 case NoClass:

2651 break;

2652

2654 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);

2655 if (Lo == NoClass)

2657 break;

2658 case SSE:

2659 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);

2660 if (Lo == NoClass)

2662 break;

2663

2664

2665

2666

2667

2668

2669 case SSEUp:

2670 assert(Lo == SSE && "Unexpected SSEUp classification.");

2671 ResType = GetByteVectorType(RetTy);

2672 break;

2673

2674

2675

2676 case X87Up:

2677

2678

2679

2680

2681 if (Lo != X87) {

2682 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);

2683 if (Lo == NoClass)

2685 }

2686 break;

2687 }

2688

2689

2690

2691

2692 if (HighPart)

2694

2696}

2697

2699X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned freeIntRegs,

2700 unsigned &neededInt, unsigned &neededSSE,

2701 bool isNamedArg, bool IsRegCall) const {

2703

2704 X86_64ABIInfo::Class Lo, Hi;

2705 classify(Ty, 0, Lo, Hi, isNamedArg, IsRegCall);

2706

2707

2708

2709 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");

2710 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");

2711

2712 neededInt = 0;

2713 neededSSE = 0;

2714 llvm::Type *ResType = nullptr;

2715 switch (Lo) {

2716 case NoClass:

2717 if (Hi == NoClass)

2719

2720

2721 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&

2722 "Unknown missing lo part");

2723 break;

2724

2725

2726

2727 case Memory:

2728

2729

2730

2731 case X87:

2732 case ComplexX87:

2734 ++neededInt;

2735 return getIndirectResult(Ty, freeIntRegs);

2736

2737 case SSEUp:

2738 case X87Up:

2739 llvm_unreachable("Invalid classification for lo word.");

2740

2741

2742

2743

2745 ++neededInt;

2746

2747

2748 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);

2749

2750

2751

2752 if (Hi == NoClass && isallvm::IntegerType(ResType)) {

2753

2755 Ty = EnumTy->getDecl()->getIntegerType();

2756

2758 isPromotableIntegerTypeForABI(Ty))

2760 }

2761

2762 break;

2763

2764

2765

2766

2767 case SSE: {

2768 llvm::Type *IRType = CGT.ConvertType(Ty);

2769 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);

2770 ++neededSSE;

2771 break;

2772 }

2773 }

2774

2775 llvm::Type *HighPart = nullptr;

2776 switch (Hi) {

2777

2778

2779

2780 case Memory:

2781 case X87:

2782 case ComplexX87:

2783 llvm_unreachable("Invalid classification for hi word.");

2784

2785 case NoClass: break;

2786

2788 ++neededInt;

2789

2790 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);

2791

2792 if (Lo == NoClass)

2794 break;

2795

2796

2797

2798 case X87Up:

2799 case SSE:

2800 ++neededSSE;

2801 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);

2802

2803 if (Lo == NoClass)

2805 break;

2806

2807

2808

2809

2810 case SSEUp:

2811 assert(Lo == SSE && "Unexpected SSEUp classification");

2812 ResType = GetByteVectorType(Ty);

2813 break;

2814 }

2815

2816

2817

2818

2819 if (HighPart)

2821

2823}

2824

2826X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,

2827 unsigned &NeededSSE,

2828 unsigned &MaxVectorWidth) const {

2830 assert(RT && "classifyRegCallStructType only valid with struct types");

2831

2833 return getIndirectReturnResult(Ty);

2834

2835

2836 if (auto CXXRD = dyn_cast(RT->getDecl())) {

2837 if (CXXRD->isDynamicClass()) {

2838 NeededInt = NeededSSE = 0;

2839 return getIndirectReturnResult(Ty);

2840 }

2841

2842 for (const auto &I : CXXRD->bases())

2843 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE,

2844 MaxVectorWidth)

2845 .isIndirect()) {

2846 NeededInt = NeededSSE = 0;

2847 return getIndirectReturnResult(Ty);

2848 }

2849 }

2850

2851

2852 for (const auto *FD : RT->getDecl()->fields()) {

2853 QualType MTy = FD->getType();

2855 if (classifyRegCallStructTypeImpl(MTy, NeededInt, NeededSSE,

2856 MaxVectorWidth)

2857 .isIndirect()) {

2858 NeededInt = NeededSSE = 0;

2859 return getIndirectReturnResult(Ty);

2860 }

2861 } else {

2862 unsigned LocalNeededInt, LocalNeededSSE;

2864 true, true)

2865 .isIndirect()) {

2866 NeededInt = NeededSSE = 0;

2867 return getIndirectReturnResult(Ty);

2868 }

2869 if (const auto *AT = getContext().getAsConstantArrayType(MTy))

2870 MTy = AT->getElementType();

2872 if (getContext().getTypeSize(VT) > MaxVectorWidth)

2873 MaxVectorWidth = getContext().getTypeSize(VT);

2874 NeededInt += LocalNeededInt;

2875 NeededSSE += LocalNeededSSE;

2876 }

2877 }

2878

2880}

2881

2883X86_64ABIInfo::classifyRegCallStructType(QualType Ty, unsigned &NeededInt,

2884 unsigned &NeededSSE,

2885 unsigned &MaxVectorWidth) const {

2886

2887 NeededInt = 0;

2888 NeededSSE = 0;

2889 MaxVectorWidth = 0;

2890

2891 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE,

2892 MaxVectorWidth);

2893}

2894

2895void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {

2896

2898

2899

2900

2901 if (CallingConv == llvm::CallingConv::Win64) {

2902 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);

2903 Win64ABIInfo.computeInfo(FI);

2904 return;

2905 }

2906

2907 bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;

2908

2909

2910 unsigned FreeIntRegs = IsRegCall ? 11 : 6;

2911 unsigned FreeSSERegs = IsRegCall ? 16 : 8;

2912 unsigned NeededInt = 0, NeededSSE = 0, MaxVectorWidth = 0;

2913

2918 FI.getReturnType(), NeededInt, NeededSSE, MaxVectorWidth);

2919 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {

2920 FreeIntRegs -= NeededInt;

2921 FreeSSERegs -= NeededSSE;

2922 } else {

2924 }

2926 getContext().getCanonicalType(FI.getReturnType()

2929 getContext().LongDoubleTy)

2930

2931

2933 else

2935 }

2936

2937

2938

2940 --FreeIntRegs;

2941 else if (NeededSSE && MaxVectorWidth > 0)

2943

2944

2946 ++FreeIntRegs;

2947

2949

2950

2951 unsigned ArgNo = 0;

2953 it != ie; ++it, ++ArgNo) {

2954 bool IsNamedArg = ArgNo < NumRequiredArgs;

2955

2956 if (IsRegCall && it->type->isStructureOrClassType())

2957 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE,

2958 MaxVectorWidth);

2959 else

2961 NeededSSE, IsNamedArg);

2962

2963

2964

2965

2966

2967 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {

2968 FreeIntRegs -= NeededInt;

2969 FreeSSERegs -= NeededSSE;

2972 } else {

2973 it->info = getIndirectResult(it->type, FreeIntRegs);

2974 }

2975 }

2976}

2977

2980 Address overflow_arg_area_p =

2982 llvm::Value *overflow_arg_area =

2983 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");

2984

2985

2986

2987

2988

2992 Align);

2993 }

2994

2995

2997 llvm::Value *Res = overflow_arg_area;

2998

2999

3000

3001

3002

3003

3005 llvm::Value *Offset =

3006 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);

3008 Offset, "overflow_arg_area.next");

3010

3011

3012 return Address(Res, LTy, Align);

3013}

3014

3017

3018

3019

3020

3021

3022

3023

3024 unsigned neededInt, neededSSE;

3025

3028 false);

3029

3030

3033

3034

3035

3036 if (!neededInt && !neededSSE)

3039 Slot);

3040

3041

3042

3043

3044

3045

3046

3047

3048

3049

3050

3051

3052 llvm::Value *InRegs = nullptr;

3054 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;

3055 if (neededInt) {

3058 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);

3059 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");

3060 }

3061

3062 if (neededSSE) {

3065 llvm::Value *FitsInFP =

3066 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);

3067 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");

3068 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;

3069 }

3070

3071 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");

3072 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");

3073 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");

3074 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);

3075

3076

3077

3079

3080

3081

3082

3083

3084

3085

3086

3087

3088

3089

3093

3095 if (neededInt && neededSSE) {

3096

3097 assert(AI.isDirect() && "Unexpected ABI info for mixed regs");

3098 llvm::StructType *ST = castllvm::StructType(AI.getCoerceToType());

3101 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");

3102 llvm::Type *TyLo = ST->getElementType(0);

3103 llvm::Type *TyHi = ST->getElementType(1);

3104 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&

3105 "Unexpected ABI info for mixed regs");

3106 llvm::Value *GPAddr =

3108 llvm::Value *FPAddr =

3110 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;

3111 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;

3112

3113

3114

3116 TyLo, RegLoAddr,

3119

3120

3122 TyHi, RegHiAddr,

3125

3127 } else if (neededInt || neededSSE == 1) {

3128

3129 auto TInfo = getContext().getTypeInfoInChars(Ty);

3130 uint64_t TySize = TInfo.Width.getQuantity();

3131 CharUnits TyAlign = TInfo.Align;

3132 llvm::Type *CoTy = nullptr;

3135

3136 llvm::Value *GpOrFpOffset = neededInt ? gp_offset : fp_offset;

3137 uint64_t Alignment = neededInt ? 8 : 16;

3138 uint64_t RegSize = neededInt ? neededInt * 8 : 16;

3139

3140

3141

3142

3143

3144

3145

3146

3147

3148

3149

3150

3151

3152

3153

3154

3155

3156

3157

3158 if (CoTy && (AI.getDirectOffset() == 8 || RegSize < TySize)) {

3160 llvm::Value *Addr =

3163 llvm::Value *PtrOffset =

3167 LTy, TyAlign);

3170 } else {

3171 RegAddr =

3174

3175

3176

3177 if (neededInt && TyAlign.getQuantity() > 8) {

3180 RegAddr = Tmp;

3181 }

3182 }

3183

3184 } else {

3185 assert(neededSSE == 2 && "Invalid number of needed registers!");

3186

3187

3188

3189

3190

3191

3193 fp_offset),

3201 llvm::Value *V;

3205 RegAddrLo.withElementType(ST->getStructElementType(0)));

3208 RegAddrHi.withElementType(ST->getStructElementType(1)));

3210

3212 }

3213

3214

3215

3216

3217 if (neededInt) {

3218 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);

3220 gp_offset_p);

3221 }

3222 if (neededSSE) {

3223 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);

3225 fp_offset_p);

3226 }

3228

3229

3230

3233

3234

3235

3237 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,

3238 "vaarg.addr");

3240}

3241

3244

3245

3246 uint64_t Width = getContext().getTypeSize(Ty);

3247 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);

3248

3252 false, Slot);

3253}

3254

3255ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall(

3256 QualType Ty, unsigned &FreeSSERegs, const ABIArgInfo &current) const {

3259

3261 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {

3262 FreeSSERegs -= NumElts;

3263 return getDirectX86Hva();

3264 }

3265 return current;

3266}

3267

3268ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,

3269 bool IsReturnType, bool IsVectorCall,

3270 bool IsRegCall) const {

3271

3274

3276 Ty = EnumTy->getDecl()->getIntegerType();

3277

3278 TypeInfo Info = getContext().getTypeInfo(Ty);

3280 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);

3281

3283 if (RT) {

3284 if (!IsReturnType) {

3287 }

3288

3290 return getNaturalAlignIndirect(Ty, false);

3291

3292 }

3293

3296

3297

3298 if ((IsVectorCall || IsRegCall) &&

3299 isHomogeneousAggregate(Ty, Base, NumElts)) {

3300 if (IsRegCall) {

3301 if (FreeSSERegs >= NumElts) {

3302 FreeSSERegs -= NumElts;

3306 }

3308 } else if (IsVectorCall) {

3309 if (FreeSSERegs >= NumElts &&

3311 FreeSSERegs -= NumElts;

3313 } else if (IsReturnType) {

3316

3318 }

3319 }

3320 }

3321

3323

3324

3325 llvm::Type *LLTy = CGT.ConvertType(Ty);

3326 if (LLTy->isPointerTy() || LLTy->isIntegerTy())

3328 }

3329

3331

3332

3333 if (Width > 64 || !llvm::isPowerOf2_64(Width))

3334 return getNaturalAlignIndirect(Ty, false);

3335

3336

3338 }

3339

3341 switch (BT->getKind()) {

3342 case BuiltinType::Bool:

3343

3344

3346

3347 case BuiltinType::LongDouble:

3348

3349

3350 if (IsMingw64) {

3351 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();

3352 if (LDF == &llvm::APFloat::x87DoubleExtended())

3354 }

3355 break;

3356

3357 case BuiltinType::Int128:

3358 case BuiltinType::UInt128:

3359

3360

3361

3362 if (!IsReturnType)

3364

3365

3366

3368 llvm::Type::getInt64Ty(getVMContext()), 2));

3369

3370 default:

3371 break;

3372 }

3373 }

3374

3376

3377

3378

3379

3380

3381 if (Width <= 64)

3384 }

3385

3387}

3388

3389void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {

3391 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;

3392 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;

3393

3394

3395

3396 if (CC == llvm::CallingConv::X86_64_SysV) {

3397 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);

3398 SysVABIInfo.computeInfo(FI);

3399 return;

3400 }

3401

3402 unsigned FreeSSERegs = 0;

3403 if (IsVectorCall) {

3404

3405 FreeSSERegs = 4;

3406 } else if (IsRegCall) {

3407

3408 FreeSSERegs = 16;

3409 }

3410

3413 IsVectorCall, IsRegCall);

3414

3415 if (IsVectorCall) {

3416

3417 FreeSSERegs = 6;

3418 } else if (IsRegCall) {

3419

3420 FreeSSERegs = 16;

3421 }

3422

3423 unsigned ArgNum = 0;

3424 unsigned ZeroSSERegs = 0;

3425 for (auto &I : FI.arguments()) {

3426

3427

3428

3429 unsigned *MaybeFreeSSERegs =

3430 (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs;

3432 classify(I.type, *MaybeFreeSSERegs, false, IsVectorCall, IsRegCall);

3433 ++ArgNum;

3434 }

3435

3436 if (IsVectorCall) {

3437

3438

3440 I.info = reclassifyHvaArgForVectorCall(I.type, FreeSSERegs, I.info);

3441 }

3442}

3443

3446

3447

3448 uint64_t Width = getContext().getTypeSize(Ty);

3449 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);

3450

3454 false, Slot);

3455}

3456

3458 CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI,

3459 unsigned NumRegisterParameters, bool SoftFloatABI) {

3460 bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(

3462 return std::make_unique<X86_32TargetCodeGenInfo>(

3463 CGM.getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,

3464 NumRegisterParameters, SoftFloatABI);

3465}

3466

3468 CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI,

3469 unsigned NumRegisterParameters) {

3470 bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(

3472 return std::make_unique<WinX86_32TargetCodeGenInfo>(

3473 CGM.getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,

3474 NumRegisterParameters);

3475}

3476

3477std::unique_ptr

3480 return std::make_unique<X86_64TargetCodeGenInfo>(CGM.getTypes(), AVXLevel);

3481}

3482

3483std::unique_ptr

3486 return std::make_unique<WinX86_64TargetCodeGenInfo>(CGM.getTypes(), AVXLevel);

3487}

static bool checkAVX512ParamFeature(DiagnosticsEngine &Diag, SourceLocation CallLoc, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, bool IsArgument)

static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context)

static void rewriteInputConstraintReferences(unsigned FirstIn, unsigned NumNewOuts, std::string &AsmString)

Rewrite input constraint references after adding some output constraints.

static void initFeatureMaps(const ASTContext &Ctx, llvm::StringMap< bool > &CallerMap, const FunctionDecl *Caller, llvm::StringMap< bool > &CalleeMap, const FunctionDecl *Callee)

static llvm::Type * GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, const llvm::DataLayout &TD)

GetX86_64ByValArgumentPair - Given a high and low type that can ideally be used as elements of a two ...

static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx, SourceLocation CallLoc, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, bool IsArgument)

static bool checkAVXParamFeature(DiagnosticsEngine &Diag, SourceLocation CallLoc, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, StringRef Feature, bool IsArgument)

static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, uint64_t &Size)

static llvm::Type * getFPTypeAtOffset(llvm::Type *IRType, unsigned IROffset, const llvm::DataLayout &TD)

getFPTypeAtOffset - Return a floating point type at the specified offset.

static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, uint64_t &Size)

static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, unsigned EndBit, ASTContext &Context)

BitsContainNoUserData - Return true if the specified [start,end) bit range is known to either be off ...

static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, Address VAListAddr, QualType Ty)

static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM)

static bool isArgInAlloca(const ABIArgInfo &Info)

static DiagnosticBuilder Diag(DiagnosticsEngine *Diags, const LangOptions &Features, FullSourceLoc TokLoc, const char *TokBegin, const char *TokRangeBegin, const char *TokRangeEnd, unsigned DiagID)

Produce a diagnostic highlighting some portion of a literal.

Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...

const ConstantArrayType * getAsConstantArrayType(QualType T) const

CharUnits getTypeAlignInChars(QualType T) const

Return the ABI-specified alignment of a (complete) type T, in characters.

const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const

Get or compute information about the layout of the specified record (struct/union/class) D,...

TypeInfoChars getTypeInfoInChars(const Type *T) const

int64_t toBits(CharUnits CharSize) const

Convert a size in characters to a size in bits.

uint64_t getTypeSize(QualType T) const

Return the size of the specified (complete) type T, in bits.

const TargetInfo & getTargetInfo() const

void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, const FunctionDecl *) const

ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...

uint64_t getFieldOffset(unsigned FieldNo) const

getFieldOffset - Get the offset of the given field index, in bits.

CharUnits getRequiredAlignment() const

CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const

getBaseClassOffset - Get the offset, in chars, for the given base class.

A fixed int type of a specified bitwidth.

This class is used for builtin types like 'int'.

Represents a base class of a C++ class.

Represents a C++ struct/union/class.

CanProxy< U > getAs() const

Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.

const T * getTypePtr() const

Retrieve the underlying type pointer, which refers to a canonical type.

CharUnits - This is an opaque type for sizes expressed in character units.

QuantityType getQuantity() const

getQuantity - Get the raw integer representation of this quantity.

static CharUnits One()

One - Construct a CharUnits quantity of one.

bool isMultipleOf(CharUnits N) const

Test whether this is a multiple of the other value.

static CharUnits fromQuantity(QuantityType Quantity)

fromQuantity - Construct a CharUnits quantity from a raw integer type.

CharUnits alignTo(const CharUnits &Align) const

alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...

CodeGenOptions - Track various options which control how the code is optimized and passed to the back...

ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...

bool getIndirectByVal() const

static ABIArgInfo getInAlloca(unsigned FieldIndex, bool Indirect=false)

static ABIArgInfo getIgnore()

static ABIArgInfo getExpand()

unsigned getDirectOffset() const

void setIndirectAlign(CharUnits IA)

static ABIArgInfo getExtendInReg(QualType Ty, llvm::Type *T=nullptr)

static ABIArgInfo getExpandWithPadding(bool PaddingInReg, llvm::Type *Padding)

static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)

static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)

@ Extend

Extend - Valid only for integer argument types.

@ Ignore

Ignore - Ignore the argument (treat as void).

@ IndirectAliased

IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...

@ Expand

Expand - Only valid for aggregate argument types.

@ InAlloca

InAlloca - Pass the argument directly using the LLVM inalloca attribute.

@ Indirect

Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...

@ CoerceAndExpand

CoerceAndExpand - Only valid for aggregate argument types.

@ Direct

Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...

static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)

llvm::Type * getCoerceToType() const

bool canHaveCoerceToType() const

static ABIArgInfo getDirectInReg(llvm::Type *T=nullptr)

ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.

ASTContext & getContext() const

virtual bool isHomogeneousAggregateBaseType(QualType Ty) const

virtual RValue EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const

Emit the target dependent code to load a value of.

virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const

const TargetInfo & getTarget() const

virtual RValue EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty, AggValueSlot Slot) const =0

EmitVAArg - Emit the target dependent code to load a value of.

virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0

Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...

llvm::Value * getBasePointer() const

Address withElementType(llvm::Type *ElemTy) const

Return address with different element type, but same pointer and alignment.

llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)

Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")

Given a pointer to i8, adjust it by a given constant offset.

Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")

Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")

llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")

llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)

llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")

RecordArgABI

Specify how one should pass an argument of a record type.

@ RAA_Indirect

Pass it as a pointer to temporary memory.

@ RAA_DirectInMemory

Pass it on the stack using its defined layout.

CGFunctionInfo - Class to encapsulate the information about a function definition.

ABIArgInfo & getReturnInfo()

unsigned getCallingConvention() const

getCallingConvention - Return the user specified calling convention, which has been translated into a...

const_arg_iterator arg_begin() const

unsigned getRegParm() const

CanQualType getReturnType() const

bool getHasRegParm() const

MutableArrayRef< ArgInfo > arguments()

const_arg_iterator arg_end() const

void setArgStruct(llvm::StructType *Ty, CharUnits Align)

unsigned getMaxVectorWidth() const

Return the maximum vector width in the arguments.

unsigned getNumRequiredArgs() const

void setMaxVectorWidth(unsigned Width)

Set the maximum vector width in the arguments.

CallArgList - Type for representing both the value and type of arguments in a call.

CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...

llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)

createBasicBlock - Create an LLVM basic block.

void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)

EmitBlock - Emit the given block.

llvm::Type * ConvertTypeForMem(QualType T)

RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)

CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...

void EmitBranch(llvm::BasicBlock *Block)

EmitBranch - Emit a branch to the specified basic block from the current insert block,...

ASTContext & getContext() const

LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)

const CGFunctionInfo * CurFnInfo

llvm::LLVMContext & getLLVMContext()

RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})

Like EmitLoadOfLValue but also handles complex and aggregate types.

This class organizes the cross-function state that is used while generating LLVM code.

DiagnosticsEngine & getDiags() const

CodeGenTypes & getTypes()

const TargetInfo & getTarget() const

const llvm::Triple & getTriple() const

ASTContext & getContext() const

const CodeGenOptions & getCodeGenOpts() const

This class organizes the cross-module state that is used while lowering AST types to LLVM types.

llvm::Type * ConvertType(QualType T)

ConvertType - Convert type T into a llvm::Type.

LValue - This represents an lvalue references.

Address getAddress() const

void setAddress(Address address)

RValue - This trivial value class is used to represent the result of an expression that is evaluated.

A class for recording the number of arguments that a function signature requires.

Target specific hooks for defining how a type should be passed or returned from functions with one of...

bool occupiesMoreThan(ArrayRef< llvm::Type * > scalarTypes, unsigned maxAllRegisters) const

Does the given lowering require more than the given number of registers when expanded?

virtual bool shouldPassIndirectly(ArrayRef< llvm::Type * > ComponentTys, bool AsReturnValue) const

Returns true if an aggregate which expands to the given type sequence should be passed / returned ind...

TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...

virtual void addReturnRegisterOutputs(CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue, std::string &Constraints, std::vector< llvm::Type * > &ResultRegTypes, std::vector< llvm::Type * > &ResultTruncRegTypes, std::vector< CodeGen::LValue > &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const

Adds constraints and types for result registers.

virtual llvm::Type * adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, StringRef Constraint, llvm::Type *Ty) const

Corrects the low-level LLVM type for a given constraint and "usual" type.

virtual StringRef getARCRetainAutoreleasedReturnValueMarker() const

Retrieve the address of a function to call immediately before calling objc_retainAutoreleasedReturnVa...

virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, const FunctionDecl *Callee, const CallArgList &Args, QualType ReturnType) const

Any further codegen related checks that need to be done on a function call in a target specific manne...

virtual bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const

Initializes the given DWARF EH register-size table, a char*.

virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const

setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...

static std::string qualifyWindowsLibrary(StringRef Lib)

virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const

Determines the DWARF register number for the stack pointer, for exception-handling purposes.

virtual bool markARCOptimizedReturnCallsAsNoTail() const

Determine whether a call to objc_retainAutoreleasedReturnValue or objc_unsafeClaimAutoreleasedReturnV...

virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const

Determine whether a call to an unprototyped functions under the given calling convention should use t...

Complex values, per C99 6.2.5p11.

QualType getElementType() const

Represents the canonical version of C arrays with a specified constant size.

specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...

Decl - This represents one declaration (or definition), e.g.

Concrete class used by the front-end to report problems and issues.

A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.

Represents a function declaration or definition.

const ParmVarDecl * getParamDecl(unsigned i) const

unsigned getNumParams() const

Return the number of parameters this function must have based on its FunctionType.

Represents a K&R-style 'int foo()' function, which has no information available about its arguments.

CallingConv getCallConv() const

@ Ver11

Attempt to be ABI-compatible with code generated by Clang 11.0.x (git 2e10b7a39b93).

A (possibly-)qualified type.

const Type * getTypePtr() const

Retrieves a pointer to the underlying (unqualified) type.

QualType getCanonicalType() const

Represents a struct/union/class.

bool hasFlexibleArrayMember() const

field_iterator field_end() const

field_range fields() const

field_iterator field_begin() const

A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...

RecordDecl * getDecl() const

Encodes a location in the source.

const llvm::Triple & getTriple() const

Returns the target triple of the primary target.

const llvm::fltSemantics & getLongDoubleFormat() const

The base class of the type hierarchy.

bool isBlockPointerType() const

bool isFloat16Type() const

bool isPointerType() const

bool isReferenceType() const

bool isEnumeralType() const

bool isIntegralOrEnumerationType() const

Determine whether this type is an integral or enumeration type.

bool isBitIntType() const

bool isSpecificBuiltinType(unsigned K) const

Test for a particular builtin type.

bool isBuiltinType() const

Helper methods to distinguish type categories.

bool isAnyComplexType() const

bool isMemberPointerType() const

bool isBFloat16Type() const

bool isMemberFunctionPointerType() const

bool isVectorType() const

const T * getAs() const

Member-template getAs'.

bool isRecordType() const

bool hasPointerRepresentation() const

Whether this type is represented natively as a pointer.

Represents a GCC generic vector type.

ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)

Classify the rules for how to pass a particular type.

CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)

std::unique_ptr< TargetCodeGenInfo > createX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel)

bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)

std::unique_ptr< TargetCodeGenInfo > createWinX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI, unsigned NumRegisterParameters)

bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty)

RValue emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, AggValueSlot Slot, bool ForceRightAdjust=false)

Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...

Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")

X86AVXABILevel

The AVX ABI level for X86 targets.

bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays, bool AsIfNoUniqueAddr=false)

isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...

llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)

bool isAggregateTypeForABI(QualType T)

const Type * isSingleElementStruct(QualType T, ASTContext &Context)

isSingleElementStruct - Determine if a structure is a "single element struct", i.e.

void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array, llvm::Value *Value, unsigned FirstIndex, unsigned LastIndex)

QualType useFirstFieldIfTransparentUnion(QualType Ty)

Pass transparent unions as if they were the type of the first element.

std::unique_ptr< TargetCodeGenInfo > createX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI, unsigned NumRegisterParameters, bool SoftFloatABI)

bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)

isEmptyRecord - Return true iff a structure contains only empty fields.

std::unique_ptr< TargetCodeGenInfo > createWinX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel)

bool isSIMDVectorType(ASTContext &Context, QualType Ty)

const internal::VariadicAllOfMatcher< Type > type

Matches Types in the clang AST.

bool Ret(InterpState &S, CodePtr &PC)

The JSON file list parser is used to communicate input to InstallAPI.

@ Result

The result type of a method or function.

const FunctionProtoType * T

CallingConv

CallingConv - Specifies the calling convention that a function uses.

@ Class

The "class" keyword introduces the elaborated-type-specifier.

llvm::IntegerType * Int64Ty

llvm::IntegerType * Int8Ty

i8, i16, i32, and i64

llvm::IntegerType * Int32Ty