LLVM: lib/Target/AVR/AVRISelLowering.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

15

26

32

33namespace llvm {

34

38

41

42

44

50

53

58

60

65 }

66 }

67

69

75 }

76

77

78

81

82

83

96

101

107

118

120

121

130

132

137

138

147 }

148

149

158

159

166

167

170

171

174

175

176

177 if (Subtarget.supportsMultiplication()) {

180 }

181

185 }

186

191 }

192

195

196

197

198

199 }

200

203}

204

206 EVT VT) const {

207 assert(!VT.isVector() && "No AVR SetCC type for vectors!");

208 return MVT::i8;

209}

210

212 unsigned Opc8;

214 EVT VT = Op.getValueType();

217 "Expected power-of-2 shift amount");

218

221

222

224 }

225 SDVTList ResTys = DAG.getVTList(MVT::i16, MVT::i16);

232 uint64_t ShiftAmount = N->getConstantOperandVal(1);

233 if (ShiftAmount == 16) {

234

235

236

237

238 switch (Op.getOpcode()) {

242 }

246 }

247 }

248 }

250 unsigned Opc;

251 switch (Op.getOpcode()) {

252 default:

255 Opc = AVRISD::LSLW;

256 break;

258 Opc = AVRISD::LSRW;

259 break;

261 Opc = AVRISD::ASRW;

262 break;

263 }

267 }

268

269

271 switch (Op.getOpcode()) {

272 default:

275 return DAG.getNode(AVRISD::LSLLOOP, dl, VT, N->getOperand(0),

276 N->getOperand(1));

278 return DAG.getNode(AVRISD::LSRLOOP, dl, VT, N->getOperand(0),

279 N->getOperand(1));

281 SDValue Amt = N->getOperand(1);

282 EVT AmtVT = Amt.getValueType();

285 return DAG.getNode(AVRISD::ROLLOOP, dl, VT, N->getOperand(0), Amt);

286 }

288 SDValue Amt = N->getOperand(1);

289 EVT AmtVT = Amt.getValueType();

292 return DAG.getNode(AVRISD::RORLOOP, dl, VT, N->getOperand(0), Amt);

293 }

295 return DAG.getNode(AVRISD::ASRLOOP, dl, VT, N->getOperand(0),

296 N->getOperand(1));

297 }

298 }

299

300 uint64_t ShiftAmount = N->getConstantOperandVal(1);

301 SDValue Victim = N->getOperand(0);

302

303 switch (Op.getOpcode()) {

305 Opc8 = AVRISD::ASR;

306 break;

308 Opc8 = AVRISD::ROL;

310 break;

312 Opc8 = AVRISD::ROR;

314 break;

316 Opc8 = AVRISD::LSR;

317 break;

319 Opc8 = AVRISD::LSL;

320 break;

321 default:

323 }

324

325

327 if (Op.getOpcode() == ISD::SHL && 4 <= ShiftAmount && ShiftAmount < 7) {

328

329 Victim = DAG.getNode(AVRISD::SWAP, dl, VT, Victim);

330 Victim =

332 ShiftAmount -= 4;

333 } else if (Op.getOpcode() == ISD::SRL && 4 <= ShiftAmount &&

334 ShiftAmount < 7) {

335

336 Victim = DAG.getNode(AVRISD::SWAP, dl, VT, Victim);

337 Victim =

339 ShiftAmount -= 4;

340 } else if (Op.getOpcode() == ISD::SHL && ShiftAmount == 7) {

341

342 Victim = DAG.getNode(AVRISD::LSLBN, dl, VT, Victim,

344 ShiftAmount = 0;

345 } else if (Op.getOpcode() == ISD::SRL && ShiftAmount == 7) {

346

347 Victim = DAG.getNode(AVRISD::LSRBN, dl, VT, Victim,

349 ShiftAmount = 0;

350 } else if (Op.getOpcode() == ISD::SRA && ShiftAmount == 6) {

351

352 Victim = DAG.getNode(AVRISD::ASRBN, dl, VT, Victim,

354 ShiftAmount = 0;

355 } else if (Op.getOpcode() == ISD::SRA && ShiftAmount == 7) {

356

357 Victim = DAG.getNode(AVRISD::ASRBN, dl, VT, Victim,

359 ShiftAmount = 0;

360 } else if (Op.getOpcode() == ISD::ROTL && ShiftAmount == 3) {

361

362 Victim = DAG.getNode(AVRISD::SWAP, dl, VT, Victim);

363 Victim = DAG.getNode(AVRISD::ROR, dl, VT, Victim);

364 ShiftAmount = 0;

365 } else if (Op.getOpcode() == ISD::ROTR && ShiftAmount == 3) {

366

367 Victim = DAG.getNode(AVRISD::SWAP, dl, VT, Victim);

368 Victim = DAG.getNode(AVRISD::ROL, dl, VT, Victim);

369 ShiftAmount = 0;

370 } else if (Op.getOpcode() == ISD::ROTL && ShiftAmount == 7) {

371

372 Victim = DAG.getNode(AVRISD::ROR, dl, VT, Victim);

373 ShiftAmount = 0;

374 } else if (Op.getOpcode() == ISD::ROTR && ShiftAmount == 7) {

375

376 Victim = DAG.getNode(AVRISD::ROL, dl, VT, Victim);

377 ShiftAmount = 0;

379 ShiftAmount >= 4) {

380

381 Victim = DAG.getNode(AVRISD::SWAP, dl, VT, Victim);

382 ShiftAmount -= 4;

383 }

386

387 switch (ShiftAmount) {

388 case 15:

389 Victim = DAG.getNode(AVRISD::ASRWN, dl, VT, Victim,

391 ShiftAmount = 0;

392 break;

393 case 14:

394 Victim = DAG.getNode(AVRISD::ASRWN, dl, VT, Victim,

396 ShiftAmount = 0;

397 break;

398 case 7:

399 Victim = DAG.getNode(AVRISD::ASRWN, dl, VT, Victim,

401 ShiftAmount = 0;

402 break;

403 default:

404 break;

405 }

406 if (4 <= ShiftAmount && ShiftAmount < 8)

407 switch (Op.getOpcode()) {

409 Victim = DAG.getNode(AVRISD::LSLWN, dl, VT, Victim,

411 ShiftAmount -= 4;

412 break;

414 Victim = DAG.getNode(AVRISD::LSRWN, dl, VT, Victim,

416 ShiftAmount -= 4;

417 break;

418 default:

419 break;

420 }

421 else if (8 <= ShiftAmount && ShiftAmount < 12)

422 switch (Op.getOpcode()) {

424 Victim = DAG.getNode(AVRISD::LSLWN, dl, VT, Victim,

426 ShiftAmount -= 8;

427

428 Opc8 = AVRISD::LSLHI;

429 break;

431 Victim = DAG.getNode(AVRISD::LSRWN, dl, VT, Victim,

433 ShiftAmount -= 8;

434

435 Opc8 = AVRISD::LSRLO;

436 break;

438 Victim = DAG.getNode(AVRISD::ASRWN, dl, VT, Victim,

440 ShiftAmount -= 8;

441

442 Opc8 = AVRISD::ASRLO;

443 break;

444 default:

445 break;

446 }

447 else if (12 <= ShiftAmount)

448 switch (Op.getOpcode()) {

450 Victim = DAG.getNode(AVRISD::LSLWN, dl, VT, Victim,

452 ShiftAmount -= 12;

453

454 Opc8 = AVRISD::LSLHI;

455 break;

457 Victim = DAG.getNode(AVRISD::LSRWN, dl, VT, Victim,

459 ShiftAmount -= 12;

460

461 Opc8 = AVRISD::LSRLO;

462 break;

464 Victim = DAG.getNode(AVRISD::ASRWN, dl, VT, Victim,

466 ShiftAmount -= 8;

467

468 Opc8 = AVRISD::ASRLO;

469 break;

470 default:

471 break;

472 }

473 }

474

475 while (ShiftAmount--) {

476 Victim = DAG.getNode(Opc8, dl, VT, Victim);

477 }

478

479 return Victim;

480}

481

483 unsigned Opcode = Op->getOpcode();

485 "Invalid opcode for Div/Rem lowering");

487 EVT VT = Op->getValueType(0);

488 Type *Ty = VT.getTypeForEVT(*DAG.getContext());

489

490 RTLIB::Libcall LC;

491 switch (VT.getSimpleVT().SimpleTy) {

492 default:

494 case MVT::i8:

495 LC = IsSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8;

496 break;

497 case MVT::i16:

498 LC = IsSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16;

499 break;

500 case MVT::i32:

501 LC = IsSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;

502 break;

503 }

504

505 SDValue InChain = DAG.getEntryNode();

506

509 TargetLowering::ArgListEntry Entry(

510 Value, Value.getValueType().getTypeForEVT(*DAG.getContext()));

511 Entry.IsSExt = IsSigned;

512 Entry.IsZExt = !IsSigned;

513 Args.push_back(Entry);

514 }

515

518

520

521 SDLoc dl(Op);

522 TargetLowering::CallLoweringInfo CLI(DAG);

523 CLI.setDebugLoc(dl)

524 .setChain(InChain)

526 .setInRegister()

527 .setSExtResult(IsSigned)

528 .setZExtResult(!IsSigned);

529

530 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);

531 return CallInfo.first;

532}

533

536 auto DL = DAG.getDataLayout();

537

540

541

544 return DAG.getNode(AVRISD::WRAPPER, SDLoc(Op), getPointerTy(DL), Result);

545}

546

549 auto DL = DAG.getDataLayout();

551

553

554 return DAG.getNode(AVRISD::WRAPPER, SDLoc(Op), getPointerTy(DL), Result);

555}

556

557

559 switch (CC) {

560 default:

574 }

575}

576

577

579 SelectionDAG &DAG, SDLoc DL) const {

580 assert((LHS.getSimpleValueType() == RHS.getSimpleValueType()) &&

581 "LHS and RHS have different types");

582 assert(((LHS.getSimpleValueType() == MVT::i16) ||

583 (LHS.getSimpleValueType() == MVT::i8)) &&

584 "invalid comparison type");

585

587

590

591

593 DAG.getIntPtrConstant(0, DL));

595 DAG.getIntPtrConstant(1, DL));

596 SDValue RHSlo = (Imm & 0xff) == 0

599 DAG.getIntPtrConstant(0, DL));

600 SDValue RHShi = (Imm & 0xff00) == 0

603 DAG.getIntPtrConstant(1, DL));

604 Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHSlo, RHSlo);

605 Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHShi, RHShi, Cmp);

607

608

609 uint64_t Imm = LHS->getAsZExtVal();

611 ? DAG.getRegister(Subtarget.getZeroRegister(), MVT::i8)

613 DAG.getIntPtrConstant(0, DL));

615 ? DAG.getRegister(Subtarget.getZeroRegister(), MVT::i8)

617 DAG.getIntPtrConstant(1, DL));

619 DAG.getIntPtrConstant(0, DL));

621 DAG.getIntPtrConstant(1, DL));

622 Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHSlo, RHSlo);

623 Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHShi, RHShi, Cmp);

624 } else {

625

626 Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHS, RHS);

627 }

628

629 return Cmp;

630}

631

632

633

638 EVT VT = LHS.getValueType();

639 bool UseTest = false;

640

641 switch (CC) {

642 default:

643 break;

645

648 break;

649 }

652 switch (C->getSExtValue()) {

653 case -1: {

654

655

656 UseTest = true;

658 break;

659 }

660 case 0: {

661

662

664 LHS = DAG.getConstant(0, DL, VT);

666 break;

667 }

668 default: {

669

670

671 RHS = DAG.getSignedConstant(C->getSExtValue() + 1, DL, VT);

673 break;

674 }

675 }

676 break;

677 }

678

681 break;

682 }

685 switch (C->getSExtValue()) {

686 case 1: {

687

688

690 LHS = DAG.getConstant(0, DL, VT);

692 break;

693 }

694 case 0: {

695

696

697 UseTest = true;

699 break;

700 }

701 }

702 }

703 break;

704 }

706

709 break;

710 }

712

713

715

716

717 assert((C->isAllOnes()) && "integer overflow in comparison transform");

718 RHS = DAG.getConstant(C->getZExtValue() + 1, DL, VT);

720 break;

721 }

722

725 break;

726 }

727 }

728

729

730

731 if (VT == MVT::i32) {

733 DAG.getIntPtrConstant(0, DL));

735 DAG.getIntPtrConstant(1, DL));

737 DAG.getIntPtrConstant(0, DL));

739 DAG.getIntPtrConstant(1, DL));

740

741 if (UseTest) {

742

744 DAG.getIntPtrConstant(1, DL));

745 Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue, Top);

746 } else {

747 Cmp = getAVRCmp(LHSlo, RHSlo, DAG, DL);

748 Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHShi, RHShi, Cmp);

749 }

750 } else if (VT == MVT::i64) {

752 DAG.getIntPtrConstant(0, DL));

754 DAG.getIntPtrConstant(1, DL));

755

757 DAG.getIntPtrConstant(0, DL));

759 DAG.getIntPtrConstant(1, DL));

761 DAG.getIntPtrConstant(0, DL));

763 DAG.getIntPtrConstant(1, DL));

764

766 DAG.getIntPtrConstant(0, DL));

768 DAG.getIntPtrConstant(1, DL));

769

771 DAG.getIntPtrConstant(0, DL));

773 DAG.getIntPtrConstant(1, DL));

775 DAG.getIntPtrConstant(0, DL));

777 DAG.getIntPtrConstant(1, DL));

778

779 if (UseTest) {

780

782 DAG.getIntPtrConstant(1, DL));

783 Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue, Top);

784 } else {

785 Cmp = getAVRCmp(LHS0, RHS0, DAG, DL);

786 Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS1, RHS1, Cmp);

787 Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS2, RHS2, Cmp);

788 Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS3, RHS3, Cmp);

789 }

790 } else if (VT == MVT::i8 || VT == MVT::i16) {

791 if (UseTest) {

792

793 Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue,

794 (VT == MVT::i8)

797 LHS, DAG.getIntPtrConstant(1, DL)));

798 } else {

800 }

801 } else {

803 }

804

805

806 if (!UseTest) {

807 AVRcc = DAG.getConstant(intCCToAVRCC(CC), DL, MVT::i8);

808 }

809

810 return Cmp;

811}

812

814 SDValue Chain = Op.getOperand(0);

819 SDLoc dl(Op);

820

823

824 return DAG.getNode(AVRISD::BRCOND, dl, MVT::Other, Chain, Dest, TargetCC,

825 Cmp);

826}

827

831 SDValue TrueV = Op.getOperand(2);

832 SDValue FalseV = Op.getOperand(3);

834 SDLoc dl(Op);

835

838

840

841 return DAG.getNode(AVRISD::SELECT_CC, dl, Op.getValueType(), Ops);

842}

843

848 SDLoc DL(Op);

849

852

853 SDValue TrueV = DAG.getConstant(1, DL, Op.getValueType());

854 SDValue FalseV = DAG.getConstant(0, DL, Op.getValueType());

856

857 return DAG.getNode(AVRISD::SELECT_CC, DL, Op.getValueType(), Ops);

858}

859

861 const MachineFunction &MF = DAG.getMachineFunction();

862 const AVRMachineFunctionInfo *AFI = MF.getInfo();

864 auto DL = DAG.getDataLayout();

865 SDLoc dl(Op);

866

867

868

870

871 return DAG.getStore(Op.getOperand(0), dl, FI, Op.getOperand(1),

872 MachinePointerInfo(SV));

873}

874

875

877 SDValue ZeroReg = DAG.getRegister(Subtarget.getZeroRegister(), MVT::i8);

880

881

882 return Op;

883 }

884

885

886

887

888

889

890

891 SDLoc dl(Op);

893 SDNode *N = Op.getNode();

895 for (unsigned I = 0; I < N->getNumOperands(); I++) {

896 SDValue Operand = N->getOperand(I);

897 if (Operand.getValueType() == MVT::Glue) {

898

899

900 Glue = Operand;

901 } else {

902 Ops.push_back(Operand);

903 }

904 }

906 Ops.push_back(DAG.getTargetConstant(Flags, dl, MVT::i32));

907 Ops.push_back(ZeroReg);

908 if (Glue) {

909 Ops.push_back(Glue);

910 }

911

912

913

914 SDValue New = DAG.getNode(N->getOpcode(), dl, N->getVTList(), Ops);

915 DAG.ReplaceAllUsesOfValueWith(Op, New);

916 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), New.getValue(1));

917

918 return New;

919}

920

922 switch (Op.getOpcode()) {

923 default:

930 return LowerShifts(Op, DAG);

932 return LowerGlobalAddress(Op, DAG);

934 return LowerBlockAddress(Op, DAG);

935 case ISD::BR_CC:

936 return LowerBR_CC(Op, DAG);

938 return LowerSELECT_CC(Op, DAG);

940 return LowerSETCC(Op, DAG);

941 case ISD::VASTART:

942 return LowerVASTART(Op, DAG);

945 return LowerDivRem(Op, DAG);

946 case ISD::INLINEASM:

947 return LowerINLINEASM(Op, DAG);

948 }

949

951}

952

953

954

959

960 switch (N->getOpcode()) {

962

965 ISD::SUB, DL, N->getValueType(0), N->getOperand(0),

966 DAG.getConstant(-C->getAPIntValue(), DL, C->getValueType(0)));

968 }

969 break;

970 }

971 default: {

973

976

977 break;

978 }

979 }

980}

981

982

983

986 unsigned AS,

989

990

992 return true;

993 }

994

995

997 return false;

998 }

999

1000

1001 if (Offs < 0)

1002 Offs = -Offs;

1005 return true;

1006 }

1007

1008 return false;

1009}

1010

1011

1012

1013

1021

1023 VT = LD->getMemoryVT();

1024 Op = LD->getBasePtr().getNode();

1026 return false;

1028 return false;

1029 }

1031 VT = ST->getMemoryVT();

1032 Op = ST->getBasePtr().getNode();

1034 return false;

1035 }

1036 } else {

1037 return false;

1038 }

1039

1040 if (VT != MVT::i8 && VT != MVT::i16) {

1041 return false;

1042 }

1043

1045 return false;

1046 }

1047

1049 int RHSC = RHS->getSExtValue();

1051 RHSC = -RHSC;

1052

1053 if ((VT == MVT::i16 && RHSC != -2) || (VT == MVT::i8 && RHSC != -1)) {

1054 return false;

1055 }

1056

1057 Base = Op->getOperand(0);

1060

1061 return true;

1062 }

1063

1064 return false;

1065}

1066

1067

1068

1069

1078

1080 VT = LD->getMemoryVT();

1081 Ptr = LD->getBasePtr();

1083 return false;

1085 VT = ST->getMemoryVT();

1086 Ptr = ST->getBasePtr();

1087

1089 return false;

1090

1091

1092

1093

1094 if (VT == MVT::i16 && Subtarget.hasLowByteFirst())

1095 return false;

1096 } else {

1097 return false;

1098 }

1099

1100 if (VT != MVT::i8 && VT != MVT::i16) {

1101 return false;

1102 }

1103

1105 return false;

1106 }

1107

1109 int RHSC = RHS->getSExtValue();

1111 RHSC = -RHSC;

1112 if ((VT == MVT::i16 && RHSC != 2) || (VT == MVT::i8 && RHSC != 1)) {

1113 return false;

1114 }

1115

1116

1117

1120 return false;

1121

1122 Base = Op->getOperand(0);

1123

1124

1125

1126 if (Ptr != Base)

1127 return false;

1128

1131

1132 return true;

1133 }

1134

1135 return false;

1136}

1137

1142

1143

1144

1145

1146

1147#include "AVRGenCallingConv.inc"

1148

1149

1150

1152 AVR::R25, AVR::R24, AVR::R23, AVR::R22, AVR::R21, AVR::R20,

1153 AVR::R19, AVR::R18, AVR::R17, AVR::R16, AVR::R15, AVR::R14,

1154 AVR::R13, AVR::R12, AVR::R11, AVR::R10, AVR::R9, AVR::R8};

1156 AVR::R22, AVR::R21, AVR::R20};

1158 AVR::R26R25, AVR::R25R24, AVR::R24R23, AVR::R23R22, AVR::R22R21,

1159 AVR::R21R20, AVR::R20R19, AVR::R19R18, AVR::R18R17, AVR::R17R16,

1160 AVR::R16R15, AVR::R15R14, AVR::R14R13, AVR::R13R12, AVR::R12R11,

1161 AVR::R11R10, AVR::R10R9, AVR::R9R8};

1163 AVR::R24R23, AVR::R23R22,

1164 AVR::R22R21, AVR::R21R20};

1165

1167 "8-bit and 16-bit register arrays must be of equal length");

1169 "8-bit and 16-bit register arrays must be of equal length");

1170

1171

1172

1173

1174

1175template

1180 CCState &CCInfo, bool Tiny) {

1181

1184 if (Tiny) {

1187 } else {

1190 }

1191

1192 unsigned NumArgs = Args.size();

1193

1194

1195 int RegLastIdx = -1;

1196

1197 bool UseStack = false;

1198 for (unsigned i = 0; i != NumArgs;) {

1199 MVT VT = Args[i].VT;

1200

1201

1202

1203

1204 unsigned ArgIndex = Args[i].OrigArgIndex;

1206 unsigned j = i + 1;

1207 for (; j != NumArgs; ++j) {

1208 if (Args[j].OrigArgIndex != ArgIndex)

1209 break;

1210 TotalBytes += Args[j].VT.getStoreSize();

1211 }

1212

1213 TotalBytes = alignTo(TotalBytes, 2);

1214

1215 if (TotalBytes == 0)

1216 continue;

1217

1218 unsigned RegIdx = RegLastIdx + TotalBytes;

1219 RegLastIdx = RegIdx;

1220

1221 if (RegIdx >= RegList8.size()) {

1222 UseStack = true;

1223 }

1224 for (; i != j; ++i) {

1225 MVT VT = Args[i].VT;

1226

1227 if (UseStack) {

1233 } else {

1234 unsigned Reg;

1235 if (VT == MVT::i8) {

1237 } else if (VT == MVT::i16) {

1239 } else {

1241 "calling convention can only manage i8 and i16 types");

1242 }

1243 assert(Reg && "register not available in calling convention");

1245

1246

1248 }

1249 }

1250 }

1251}

1252

1253

1254template

1255static unsigned

1257 unsigned TotalBytes = 0;

1258

1259 for (const ArgT &Arg : Args) {

1260 TotalBytes += Arg.VT.getStoreSize();

1261 }

1262 return TotalBytes;

1263}

1264

1265

1266

1267

1268template

1270 CCState &CCInfo, bool Tiny) {

1271 unsigned NumArgs = Args.size();

1273

1274 if (Tiny)

1275 assert(TotalBytes <= 4 &&

1276 "return values greater than 4 bytes cannot be lowered on AVRTiny");

1277 else

1278 assert(TotalBytes <= 8 &&

1279 "return values greater than 8 bytes cannot be lowered on AVR");

1280

1281

1284 if (Tiny) {

1287 } else {

1290 }

1291

1292

1293

1294 if (TotalBytes > 4) {

1295 TotalBytes = 8;

1296 } else {

1297 TotalBytes = alignTo(TotalBytes, 2);

1298 }

1299

1300

1301 int RegIdx = TotalBytes - 1;

1302 for (unsigned i = 0; i != NumArgs; ++i) {

1303 MVT VT = Args[i].VT;

1304 unsigned Reg;

1305 if (VT == MVT::i8) {

1307 } else if (VT == MVT::i16) {

1309 } else {

1310 llvm_unreachable("calling convention can only manage i8 and i16 types");

1311 }

1312 assert(Reg && "register not available in calling convention");

1314

1316 }

1317}

1318

1319SDValue AVRTargetLowering::LowerFormalArguments(

1321 const SmallVectorImplISD::InputArg &Ins, const SDLoc &dl,

1322 SelectionDAG &DAG, SmallVectorImpl &InVals) const {

1323 MachineFunction &MF = DAG.getMachineFunction();

1324 MachineFrameInfo &MFI = MF.getFrameInfo();

1325 auto DL = DAG.getDataLayout();

1326

1327

1329 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,

1330 *DAG.getContext());

1331

1332

1333 if (isVarArg) {

1334 CCInfo.AnalyzeFormalArguments(Ins, ArgCC_AVR_Vararg);

1335 } else {

1336 analyzeArguments(nullptr, &MF.getFunction(), &DL, Ins, ArgLocs, CCInfo,

1338 }

1339

1341 for (CCValAssign &VA : ArgLocs) {

1342

1343

1344 if (VA.isRegLoc()) {

1345 EVT RegVT = VA.getLocVT();

1346 const TargetRegisterClass *RC;

1347 if (RegVT == MVT::i8) {

1348 RC = &AVR::GPR8RegClass;

1349 } else if (RegVT == MVT::i16) {

1350 RC = &AVR::DREGSRegClass;

1351 } else {

1353 }

1354

1355 Register Reg = MF.addLiveIn(VA.getLocReg(), RC);

1356 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);

1357

1358

1359

1360

1361

1362

1363

1364 switch (VA.getLocInfo()) {

1365 default:

1368 break;

1370 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);

1371 break;

1373 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,

1374 DAG.getValueType(VA.getValVT()));

1375 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);

1376 break;

1378 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,

1379 DAG.getValueType(VA.getValVT()));

1380 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);

1381 break;

1382 }

1383

1384 InVals.push_back(ArgValue);

1385 } else {

1386

1387 assert(VA.isMemLoc());

1388

1389 EVT LocVT = VA.getLocVT();

1390

1391

1392 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,

1393 VA.getLocMemOffset(), true);

1394

1395

1396

1398 InVals.push_back(DAG.getLoad(LocVT, dl, Chain, FIN,

1400 }

1401 }

1402

1403

1404

1405 if (isVarArg) {

1406 unsigned StackSize = CCInfo.getStackSize();

1407 AVRMachineFunctionInfo *AFI = MF.getInfo();

1408

1409 AFI->setVarArgsFrameIndex(MFI.CreateFixedObject(2, StackSize, true));

1410 }

1411

1412 return Chain;

1413}

1414

1415

1416

1417

1418

1421 SelectionDAG &DAG = CLI.DAG;

1422 SDLoc &DL = CLI.DL;

1423 SmallVectorImplISD::OutputArg &Outs = CLI.Outs;

1424 SmallVectorImpl &OutVals = CLI.OutVals;

1425 SmallVectorImplISD::InputArg &Ins = CLI.Ins;

1426 SDValue Chain = CLI.Chain;

1428 bool &isTailCall = CLI.IsTailCall;

1430 bool isVarArg = CLI.IsVarArg;

1431

1432 MachineFunction &MF = DAG.getMachineFunction();

1433

1434

1435 isTailCall = false;

1436

1437

1439 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,

1440 *DAG.getContext());

1441

1442

1443

1444

1447 const GlobalValue *GV = G->getGlobal();

1451 DAG.getTargetGlobalAddress(GV, DL, getPointerTy(DAG.getDataLayout()));

1452 } else if (const ExternalSymbolSDNode *ES =

1454 Callee = DAG.getTargetExternalSymbol(ES->getSymbol(),

1456 }

1457

1458

1459 if (isVarArg) {

1460 CCInfo.AnalyzeCallOperands(Outs, ArgCC_AVR_Vararg);

1461 } else {

1462 analyzeArguments(&CLI, F, &DAG.getDataLayout(), Outs, ArgLocs, CCInfo,

1464 }

1465

1466

1467 unsigned NumBytes = CCInfo.getStackSize();

1468

1469 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL);

1470

1472

1473

1474 unsigned AI, AE;

1475 bool HasStackArgs = false;

1476 for (AI = 0, AE = ArgLocs.size(); AI != AE; ++AI) {

1477 CCValAssign &VA = ArgLocs[AI];

1478 EVT RegVT = VA.getLocVT();

1479 SDValue Arg = OutVals[AI];

1480

1481

1482 switch (VA.getLocInfo()) {

1483 default:

1486 break;

1489 break;

1492 break;

1495 break;

1497 Arg = DAG.getNode(ISD::BITCAST, DL, RegVT, Arg);

1498 break;

1499 }

1500

1501

1502

1503 if (VA.isMemLoc()) {

1504 HasStackArgs = true;

1505 break;

1506 }

1507

1508

1509

1510 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));

1511 }

1512

1513

1514

1515

1516

1517

1518

1519 if (HasStackArgs) {

1521 for (; AI != AE; AI++) {

1522 CCValAssign &VA = ArgLocs[AI];

1523 SDValue Arg = OutVals[AI];

1524

1525 assert(VA.isMemLoc());

1526

1527

1528 SDValue PtrOff = DAG.getNode(

1530 DAG.getRegister(AVR::SP, getPointerTy(DAG.getDataLayout())),

1531 DAG.getIntPtrConstant(VA.getLocMemOffset() + 1, DL));

1532

1533 MemOpChains.push_back(

1534 DAG.getStore(Chain, DL, Arg, PtrOff,

1536 }

1537

1538 if (!MemOpChains.empty())

1540 }

1541

1542

1543

1544

1546 for (auto Reg : RegsToPass) {

1547 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, InGlue);

1548 InGlue = Chain.getValue(1);

1549 }

1550

1551

1552 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);

1554 Ops.push_back(Chain);

1555 Ops.push_back(Callee);

1556

1557

1558

1559 for (auto Reg : RegsToPass) {

1560 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));

1561 }

1562

1563

1564

1565 Ops.push_back(DAG.getRegister(Subtarget.getZeroRegister(), MVT::i8));

1566

1567

1568 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();

1569 const uint32_t *Mask =

1570 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv);

1571 assert(Mask && "Missing call preserved mask for calling convention");

1572 Ops.push_back(DAG.getRegisterMask(Mask));

1573

1574 if (InGlue.getNode()) {

1575 Ops.push_back(InGlue);

1576 }

1577

1578 Chain = DAG.getNode(AVRISD::CALL, DL, NodeTys, Ops);

1579 InGlue = Chain.getValue(1);

1580

1581

1582 Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InGlue, DL);

1583

1584 if (!Ins.empty()) {

1585 InGlue = Chain.getValue(1);

1586 }

1587

1588

1589

1590 return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, DL, DAG,

1591 InVals);

1592}

1593

1594

1595

1596

1597SDValue AVRTargetLowering::LowerCallResult(

1601

1602

1604 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,

1605 *DAG.getContext());

1606

1607

1609 CCInfo.AnalyzeCallResult(Ins, RetCC_AVR_BUILTIN);

1610 } else {

1612 }

1613

1614

1615 for (CCValAssign const &RVLoc : RVLocs) {

1616 Chain = DAG.getCopyFromReg(Chain, dl, RVLoc.getLocReg(), RVLoc.getValVT(),

1617 InGlue)

1618 .getValue(1);

1619 InGlue = Chain.getValue(2);

1620 InVals.push_back(Chain.getValue(0));

1621 }

1622

1623 return Chain;

1624}

1625

1626

1627

1628

1629

1630bool AVRTargetLowering::CanLowerReturn(

1633 const Type *RetTy) const {

1636 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);

1637 return CCInfo.CheckReturn(Outs, RetCC_AVR_BUILTIN);

1638 }

1639

1641 return TotalBytes <= (unsigned)(Subtarget.hasTinyEncoding() ? 4 : 8);

1642}

1643

1646 bool isVarArg,

1650

1652

1653

1654 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,

1655 *DAG.getContext());

1656

1657 MachineFunction &MF = DAG.getMachineFunction();

1658

1659

1661 CCInfo.AnalyzeReturn(Outs, RetCC_AVR_BUILTIN);

1662 } else {

1664 }

1665

1668

1669 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {

1670 CCValAssign &VA = RVLocs[i];

1671 assert(VA.isRegLoc() && "Can only return in registers!");

1672

1673 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Glue);

1674

1675

1676 Glue = Chain.getValue(1);

1677 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));

1678 }

1679

1680

1681

1682 if (MF.getFunction().getAttributes().hasFnAttr(Attribute::Naked)) {

1683 return Chain;

1684 }

1685

1686 const AVRMachineFunctionInfo *AFI = MF.getInfo();

1687

1688 if (!AFI->isInterruptOrSignalHandler()) {

1689

1690

1691

1692

1693 RetOps.push_back(DAG.getRegister(Subtarget.getZeroRegister(), MVT::i8));

1694 }

1695

1696 unsigned RetOpc =

1697 AFI->isInterruptOrSignalHandler() ? AVRISD::RETI_GLUE : AVRISD::RET_GLUE;

1698

1699 RetOps[0] = Chain;

1700

1701 if (Glue.getNode()) {

1702 RetOps.push_back(Glue);

1703 }

1704

1705 return DAG.getNode(RetOpc, dl, MVT::Other, RetOps);

1706}

1707

1708

1709

1710

1711

1714 bool Tiny) const {

1715 unsigned Opc;

1716 const TargetRegisterClass *RC;

1717 bool HasRepeatedOperand = false;

1718 MachineFunction *F = BB->getParent();

1719 MachineRegisterInfo &RI = F->getRegInfo();

1720 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();

1722

1723 switch (MI.getOpcode()) {

1724 default:

1726 case AVR::Lsl8:

1727 Opc = AVR::ADDRdRr;

1728 RC = &AVR::GPR8RegClass;

1729 HasRepeatedOperand = true;

1730 break;

1731 case AVR::Lsl16:

1732 Opc = AVR::LSLWRd;

1733 RC = &AVR::DREGSRegClass;

1734 break;

1735 case AVR::Asr8:

1736 Opc = AVR::ASRRd;

1737 RC = &AVR::GPR8RegClass;

1738 break;

1739 case AVR::Asr16:

1740 Opc = AVR::ASRWRd;

1741 RC = &AVR::DREGSRegClass;

1742 break;

1743 case AVR::Lsr8:

1744 Opc = AVR::LSRRd;

1745 RC = &AVR::GPR8RegClass;

1746 break;

1747 case AVR::Lsr16:

1748 Opc = AVR::LSRWRd;

1749 RC = &AVR::DREGSRegClass;

1750 break;

1751 case AVR::Rol8:

1752 Opc = Tiny ? AVR::ROLBRdR17 : AVR::ROLBRdR1;

1753 RC = &AVR::GPR8RegClass;

1754 break;

1755 case AVR::Rol16:

1756 Opc = AVR::ROLWRd;

1757 RC = &AVR::DREGSRegClass;

1758 break;

1759 case AVR::Ror8:

1760 Opc = AVR::RORBRd;

1761 RC = &AVR::GPR8RegClass;

1762 break;

1763 case AVR::Ror16:

1764 Opc = AVR::RORWRd;

1765 RC = &AVR::DREGSRegClass;

1766 break;

1767 }

1768

1769 const BasicBlock *LLVM_BB = BB->getBasicBlock();

1770

1772 for (I = BB->getIterator(); I != F->end() && &(*I) != BB; ++I)

1773 ;

1774 if (I != F->end())

1775 ++I;

1776

1777

1778 MachineBasicBlock *LoopBB = F->CreateMachineBasicBlock(LLVM_BB);

1779 MachineBasicBlock *CheckBB = F->CreateMachineBasicBlock(LLVM_BB);

1780 MachineBasicBlock *RemBB = F->CreateMachineBasicBlock(LLVM_BB);

1781

1782 F->insert(I, LoopBB);

1783 F->insert(I, CheckBB);

1784 F->insert(I, RemBB);

1785

1786

1787

1789 BB->end());

1790 RemBB->transferSuccessorsAndUpdatePHIs(BB);

1791

1792

1793 BB->addSuccessor(CheckBB);

1794 LoopBB->addSuccessor(CheckBB);

1795 CheckBB->addSuccessor(LoopBB);

1796 CheckBB->addSuccessor(RemBB);

1797

1798 Register ShiftAmtReg = RI.createVirtualRegister(&AVR::GPR8RegClass);

1799 Register ShiftAmtReg2 = RI.createVirtualRegister(&AVR::GPR8RegClass);

1800 Register ShiftReg = RI.createVirtualRegister(RC);

1801 Register ShiftReg2 = RI.createVirtualRegister(RC);

1802 Register ShiftAmtSrcReg = MI.getOperand(2).getReg();

1803 Register SrcReg = MI.getOperand(1).getReg();

1804 Register DstReg = MI.getOperand(0).getReg();

1805

1806

1807

1809

1810

1811

1812 auto ShiftMI = BuildMI(LoopBB, dl, TII.get(Opc), ShiftReg2).addReg(ShiftReg);

1813 if (HasRepeatedOperand)

1814 ShiftMI.addReg(ShiftReg);

1815

1816

1817

1818

1819

1820

1821

1822 BuildMI(CheckBB, dl, TII.get(AVR::PHI), ShiftReg)

1827 BuildMI(CheckBB, dl, TII.get(AVR::PHI), ShiftAmtReg)

1828 .addReg(ShiftAmtSrcReg)

1830 .addReg(ShiftAmtReg2)

1832 BuildMI(CheckBB, dl, TII.get(AVR::PHI), DstReg)

1837

1838 BuildMI(CheckBB, dl, TII.get(AVR::DECRd), ShiftAmtReg2).addReg(ShiftAmtReg);

1840

1841 MI.eraseFromParent();

1842 return RemBB;

1843}

1844

1845

1846

1847

1848

1849

1850

1851

1852

1853

1854

1855

1862 const DebugLoc &dl = MI.getDebugLoc();

1863

1865 const bool ArithmeticShift = Opc == ISD::SRA;

1866

1867

1868 Register ZeroReg = MRI.createVirtualRegister(&AVR::GPR8RegClass);

1869 BuildMI(*BB, MI, dl, TII.get(AVR::COPY), ZeroReg)

1871

1872

1873

1874

1875

1876 if (ShiftLeft && (ShiftAmt % 8) >= 6) {

1877

1878

1879

1880

1881 size_t ShiftRegsOffset = ShiftAmt / 8;

1882 size_t ShiftRegsSize = Regs.size() - ShiftRegsOffset;

1884 Regs.slice(ShiftRegsOffset, ShiftRegsSize);

1885

1886

1887

1889

1890

1891

1892 Register LowByte = MRI.createVirtualRegister(&AVR::GPR8RegClass);

1894

1895

1896 if (ShiftAmt % 8 == 6) {

1898 Register NewLowByte = MRI.createVirtualRegister(&AVR::GPR8RegClass);

1899 BuildMI(*BB, MI, dl, TII.get(AVR::RORRd), NewLowByte).addReg(LowByte);

1900 LowByte = NewLowByte;

1901 }

1902

1903

1904 for (size_t I = 0; I < Regs.size(); I++) {

1905 int ShiftRegsIdx = I + 1;

1906 if (ShiftRegsIdx < (int)ShiftRegs.size()) {

1907 Regs[I] = ShiftRegs[ShiftRegsIdx];

1908 } else if (ShiftRegsIdx == (int)ShiftRegs.size()) {

1909 Regs[I] = std::pair(LowByte, 0);

1910 } else {

1911 Regs[I] = std::pair(ZeroReg, 0);

1912 }

1913 }

1914

1915 return;

1916 }

1917

1918

1919 if (!ShiftLeft && (ShiftAmt % 8) >= 6) {

1920

1921

1922 size_t ShiftRegsSize = Regs.size() - (ShiftAmt / 8);

1924 Regs.slice(0, ShiftRegsSize);

1925

1926

1928

1929

1930

1931

1932

1933 Register HighByte = MRI.createVirtualRegister(&AVR::GPR8RegClass);

1935 if (ArithmeticShift) {

1936

1937 BuildMI(*BB, MI, dl, TII.get(AVR::SBCRdRr), HighByte)

1940 ExtByte = HighByte;

1941

1942

1943 } else {

1944

1945 ExtByte = ZeroReg;

1946

1947 BuildMI(*BB, MI, dl, TII.get(AVR::ADCRdRr), HighByte)

1950 }

1951

1952

1953 if (ShiftAmt % 8 == 6) {

1955

1956 Register NewExt = MRI.createVirtualRegister(&AVR::GPR8RegClass);

1957 BuildMI(*BB, MI, dl, TII.get(AVR::ADCRdRr), NewExt)

1960 HighByte = NewExt;

1961 }

1962

1963

1964 for (int I = Regs.size() - 1; I >= 0; I--) {

1965 int ShiftRegsIdx = I - (Regs.size() - ShiftRegs.size()) - 1;

1966 if (ShiftRegsIdx >= 0) {

1967 Regs[I] = ShiftRegs[ShiftRegsIdx];

1968 } else if (ShiftRegsIdx == -1) {

1969 Regs[I] = std::pair(HighByte, 0);

1970 } else {

1971 Regs[I] = std::pair(ExtByte, 0);

1972 }

1973 }

1974

1975 return;

1976 }

1977

1978

1979

1980 while (ShiftLeft && ShiftAmt >= 8) {

1981

1982 for (size_t I = 0; I < Regs.size() - 1; I++) {

1983 Regs[I] = Regs[I + 1];

1984 }

1985

1986

1987 Regs[Regs.size() - 1] = std::pair(ZeroReg, 0);

1988

1989

1990 Regs = Regs.drop_back(1);

1991

1992 ShiftAmt -= 8;

1993 }

1994

1995

1997 if (!ShiftLeft && ShiftAmt >= 8) {

1998 if (ArithmeticShift) {

1999

2000 ShrExtendReg = MRI.createVirtualRegister(&AVR::GPR8RegClass);

2001 Register Tmp = MRI.createVirtualRegister(&AVR::GPR8RegClass);

2002 BuildMI(*BB, MI, dl, TII.get(AVR::ADDRdRr), Tmp)

2003 .addReg(Regs[0].first, 0, Regs[0].second)

2004 .addReg(Regs[0].first, 0, Regs[0].second);

2005 BuildMI(*BB, MI, dl, TII.get(AVR::SBCRdRr), ShrExtendReg)

2008 } else {

2009 ShrExtendReg = ZeroReg;

2010 }

2011 for (; ShiftAmt >= 8; ShiftAmt -= 8) {

2012

2013 for (size_t I = Regs.size() - 1; I != 0; I--) {

2014 Regs[I] = Regs[I - 1];

2015 }

2016

2017

2018 Regs[0] = std::pair(ShrExtendReg, 0);

2019

2020

2021 Regs = Regs.drop_front(1);

2022 }

2023 }

2024

2025

2026 assert((ShiftAmt < 8) && "Unexpect shift amount");

2027

2028

2029

2030

2031

2032

2033

2034

2035

2036

2037

2038

2039

2040

2041

2042

2043

2044

2045

2046 if (!ArithmeticShift && ShiftAmt >= 4) {

2048 for (size_t I = 0; I < Regs.size(); I++) {

2049 size_t Idx = ShiftLeft ? I : Regs.size() - I - 1;

2050 Register SwapReg = MRI.createVirtualRegister(&AVR::LD8RegClass);

2051 BuildMI(*BB, MI, dl, TII.get(AVR::SWAPRd), SwapReg)

2052 .addReg(Regs[Idx].first, 0, Regs[Idx].second);

2053 if (I != 0) {

2054 Register R = MRI.createVirtualRegister(&AVR::GPR8RegClass);

2055 BuildMI(*BB, MI, dl, TII.get(AVR::EORRdRr), R)

2058 Prev = R;

2059 }

2060 Register AndReg = MRI.createVirtualRegister(&AVR::LD8RegClass);

2061 BuildMI(*BB, MI, dl, TII.get(AVR::ANDIRdK), AndReg)

2063 .addImm(ShiftLeft ? 0xf0 : 0x0f);

2064 if (I != 0) {

2065 Register R = MRI.createVirtualRegister(&AVR::GPR8RegClass);

2066 BuildMI(*BB, MI, dl, TII.get(AVR::EORRdRr), R)

2069 size_t PrevIdx = ShiftLeft ? Idx - 1 : Idx + 1;

2070 Regs[PrevIdx] = std::pair(R, 0);

2071 }

2072 Prev = AndReg;

2073 Regs[Idx] = std::pair(AndReg, 0);

2074 }

2075 ShiftAmt -= 4;

2076 }

2077

2078

2079

2080 while (ShiftLeft && ShiftAmt) {

2081

2082 for (ssize_t I = Regs.size() - 1; I >= 0; I--) {

2083 Register Out = MRI.createVirtualRegister(&AVR::GPR8RegClass);

2085 Register InSubreg = Regs[I].second;

2086 if (I == (ssize_t)Regs.size() - 1) {

2087 BuildMI(*BB, MI, dl, TII.get(AVR::ADDRdRr), Out)

2088 .addReg(In, 0, InSubreg)

2089 .addReg(In, 0, InSubreg);

2090 } else {

2091 BuildMI(*BB, MI, dl, TII.get(AVR::ADCRdRr), Out)

2092 .addReg(In, 0, InSubreg)

2093 .addReg(In, 0, InSubreg);

2094 }

2095 Regs[I] = std::pair(Out, 0);

2096 }

2097 ShiftAmt--;

2098 }

2099 while (!ShiftLeft && ShiftAmt) {

2100

2101 for (size_t I = 0; I < Regs.size(); I++) {

2102 Register Out = MRI.createVirtualRegister(&AVR::GPR8RegClass);

2104 Register InSubreg = Regs[I].second;

2105 if (I == 0) {

2106 unsigned Opc = ArithmeticShift ? AVR::ASRRd : AVR::LSRRd;

2108 } else {

2109 BuildMI(*BB, MI, dl, TII.get(AVR::RORRd), Out).addReg(In, 0, InSubreg);

2110 }

2111 Regs[I] = std::pair(Out, 0);

2112 }

2113 ShiftAmt--;

2114 }

2115

2116 if (ShiftAmt != 0) {

2117 llvm_unreachable("don't know how to shift!");

2118 }

2119}

2120

2121

2122MachineBasicBlock *

2123AVRTargetLowering::insertWideShift(MachineInstr &MI,

2124 MachineBasicBlock *BB) const {

2126 const DebugLoc &dl = MI.getDebugLoc();

2127

2128

2129

2130 int64_t ShiftAmt = MI.getOperand(4).getImm();

2132 switch (MI.getOpcode()) {

2133 case AVR::Lsl32:

2135 break;

2136 case AVR::Lsr32:

2138 break;

2139 case AVR::Asr32:

2141 break;

2142 }

2143

2144

2145 std::array<std::pair<Register, int>, 4> Registers = {

2146 std::pair(MI.getOperand(3).getReg(), AVR::sub_hi),

2147 std::pair(MI.getOperand(3).getReg(), AVR::sub_lo),

2148 std::pair(MI.getOperand(2).getReg(), AVR::sub_hi),

2149 std::pair(MI.getOperand(2).getReg(), AVR::sub_lo),

2150 };

2151

2152

2154

2155

2156

2157

2158

2159

2160

2161

2162

2163

2164

2165

2167 (Opc != ISD::SRA || (ShiftAmt < 16 || ShiftAmt >= 22))) {

2168

2169 BuildMI(*BB, MI, dl, TII.get(AVR::REG_SEQUENCE), MI.getOperand(0).getReg())

2171 .addImm(AVR::sub_lo)

2173 .addImm(AVR::sub_hi);

2174 BuildMI(*BB, MI, dl, TII.get(AVR::REG_SEQUENCE), MI.getOperand(1).getReg())

2176 .addImm(AVR::sub_lo)

2178 .addImm(AVR::sub_hi);

2179 } else {

2180

2181 BuildMI(*BB, MI, dl, TII.get(AVR::REG_SEQUENCE), MI.getOperand(1).getReg())

2183 .addImm(AVR::sub_hi)

2185 .addImm(AVR::sub_lo);

2186 BuildMI(*BB, MI, dl, TII.get(AVR::REG_SEQUENCE), MI.getOperand(0).getReg())

2188 .addImm(AVR::sub_hi)

2190 .addImm(AVR::sub_lo);

2191 }

2192

2193

2194 MI.eraseFromParent();

2195 return BB;

2196}

2197

2199 if (I->getOpcode() == AVR::COPY) {

2200 Register SrcReg = I->getOperand(1).getReg();

2201 return (SrcReg == AVR::R0 || SrcReg == AVR::R1);

2202 }

2203

2204 return false;

2205}

2206

2207

2208

2209

2210MachineBasicBlock *AVRTargetLowering::insertMul(MachineInstr &MI,

2211 MachineBasicBlock *BB) const {

2214 ++I;

2216 ++I;

2218 ++I;

2219 BuildMI(*BB, I, MI.getDebugLoc(), TII.get(AVR::EORRdRr), AVR::R1)

2222 return BB;

2223}

2224

2225

2229 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();

2231 BuildMI(*BB, I, MI.getDebugLoc(), TII.get(AVR::COPY))

2232 .add(MI.getOperand(0))

2234 MI.eraseFromParent();

2235 return BB;

2236}

2237

2238

2239

2242 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();

2243 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();

2246

2247

2248

2249

2250

2251

2252

2253

2254

2255

2256 const TargetRegisterClass *RC =

2257 (Width == 8) ? &AVR::GPR8RegClass : &AVR::DREGSRegClass;

2258 unsigned LoadOpcode = (Width == 8) ? AVR::LDRdPtr : AVR::LDWRdPtr;

2259 unsigned StoreOpcode = (Width == 8) ? AVR::STPtrRr : AVR::STWPtrRr;

2260

2261

2265

2266

2267 BuildMI(*BB, I, dl, TII.get(LoadOpcode), MI.getOperand(0).getReg())

2268 .add(MI.getOperand(1));

2269

2270

2272 BuildMI(*BB, I, dl, TII.get(Opcode), Result)

2273 .addReg(MI.getOperand(0).getReg())

2274 .add(MI.getOperand(2));

2275

2276

2277 BuildMI(*BB, I, dl, TII.get(StoreOpcode))

2278 .add(MI.getOperand(1))

2280

2281

2282 BuildMI(*BB, I, dl, TII.get(AVR::OUTARr))

2285

2286

2287 MI.eraseFromParent();

2288 return BB;

2289}

2290

2294 int Opc = MI.getOpcode();

2296

2297

2298

2299 switch (Opc) {

2300 case AVR::Lsl8:

2301 case AVR::Lsl16:

2302 case AVR::Lsr8:

2303 case AVR::Lsr16:

2304 case AVR::Rol8:

2305 case AVR::Rol16:

2306 case AVR::Ror8:

2307 case AVR::Ror16:

2308 case AVR::Asr8:

2309 case AVR::Asr16:

2310 return insertShift(MI, MBB, STI.hasTinyEncoding());

2311 case AVR::Lsl32:

2312 case AVR::Lsr32:

2313 case AVR::Asr32:

2314 return insertWideShift(MI, MBB);

2315 case AVR::MULRdRr:

2316 case AVR::MULSRdRr:

2317 return insertMul(MI, MBB);

2318 case AVR::CopyZero:

2319 return insertCopyZero(MI, MBB);

2320 case AVR::AtomicLoadAdd8:

2321 return insertAtomicArithmeticOp(MI, MBB, AVR::ADDRdRr, 8);

2322 case AVR::AtomicLoadAdd16:

2323 return insertAtomicArithmeticOp(MI, MBB, AVR::ADDWRdRr, 16);

2324 case AVR::AtomicLoadSub8:

2325 return insertAtomicArithmeticOp(MI, MBB, AVR::SUBRdRr, 8);

2326 case AVR::AtomicLoadSub16:

2327 return insertAtomicArithmeticOp(MI, MBB, AVR::SUBWRdRr, 16);

2328 case AVR::AtomicLoadAnd8:

2329 return insertAtomicArithmeticOp(MI, MBB, AVR::ANDRdRr, 8);

2330 case AVR::AtomicLoadAnd16:

2331 return insertAtomicArithmeticOp(MI, MBB, AVR::ANDWRdRr, 16);

2332 case AVR::AtomicLoadOr8:

2333 return insertAtomicArithmeticOp(MI, MBB, AVR::ORRdRr, 8);

2334 case AVR::AtomicLoadOr16:

2335 return insertAtomicArithmeticOp(MI, MBB, AVR::ORWRdRr, 16);

2336 case AVR::AtomicLoadXor8:

2337 return insertAtomicArithmeticOp(MI, MBB, AVR::EORRdRr, 8);

2338 case AVR::AtomicLoadXor16:

2339 return insertAtomicArithmeticOp(MI, MBB, AVR::EORWRdRr, 16);

2340 }

2341

2342 assert((Opc == AVR::Select16 || Opc == AVR::Select8) &&

2343 "Unexpected instr type to insert");

2344

2346 ->getParent()

2347 ->getSubtarget()

2348 .getInstrInfo();

2350

2351

2352

2353

2354

2355

2356

2358 const BasicBlock *LLVM_BB = MBB->getBasicBlock();

2360

2361

2362

2363

2364 if (FallThrough != nullptr) {

2366 }

2367

2370

2372 for (I = MF->begin(); I != MF->end() && &(*I) != MBB; ++I)

2373 ;

2374 if (I != MF->end())

2375 ++I;

2377 MF->insert(I, falseMBB);

2378

2379

2380 unsigned CallFrameSize = TII.getCallFrameSizeAt(MI);

2383

2384

2385

2386

2390

2394 MBB->addSuccessor(falseMBB);

2395 MBB->addSuccessor(trueMBB);

2396

2397

2400

2401

2402 BuildMI(*trueMBB, trueMBB->begin(), dl, TII.get(AVR::PHI),

2403 MI.getOperand(0).getReg())

2404 .addReg(MI.getOperand(1).getReg())

2406 .addReg(MI.getOperand(2).getReg())

2408

2409 MI.eraseFromParent();

2410 return trueMBB;

2411}

2412

2413

2414

2415

2416

2419 if (Constraint.size() == 1) {

2420

2421 switch (Constraint[0]) {

2422 default:

2423 break;

2424 case 'a':

2425 case 'b':

2426 case 'd':

2427 case 'l':

2428 case 'e':

2429 case 'q':

2430 case 'r':

2431 case 'w':

2433 case 't':

2434 case 'x':

2435 case 'X':

2436 case 'y':

2437 case 'Y':

2438 case 'z':

2439 case 'Z':

2441 case 'Q':

2443 case 'G':

2444 case 'I':

2445 case 'J':

2446 case 'K':

2447 case 'L':

2448 case 'M':

2449 case 'N':

2450 case 'O':

2451 case 'P':

2452 case 'R':

2454 }

2455 }

2456

2458}

2459

2462

2463

2464 switch (ConstraintCode[0]) {

2465 case 'Q':

2467 }

2469}

2470

2475 Value *CallOperandVal = info.CallOperandVal;

2476

2477

2478

2479

2480 if (!CallOperandVal) {

2482 }

2483

2484

2485 switch (*constraint) {

2486 default:

2488 break;

2489 case 'd':

2490 case 'r':

2491 case 'l':

2493 break;

2494 case 'a':

2495 case 'b':

2496 case 'e':

2497 case 'q':

2498 case 't':

2499 case 'w':

2500 case 'x':

2501 case 'X':

2502 case 'y':

2503 case 'Y':

2504 case 'z':

2505 case 'Z':

2507 break;

2508 case 'G':

2510 if (C->isZero()) {

2512 }

2513 }

2514 break;

2515 case 'I':

2519 }

2520 }

2521 break;

2522 case 'J':

2524 if ((C->getSExtValue() >= -63) && (C->getSExtValue() <= 0)) {

2526 }

2527 }

2528 break;

2529 case 'K':

2531 if (C->getZExtValue() == 2) {

2533 }

2534 }

2535 break;

2536 case 'L':

2538 if (C->getZExtValue() == 0) {

2540 }

2541 }

2542 break;

2543 case 'M':

2547 }

2548 }

2549 break;

2550 case 'N':

2552 if (C->getSExtValue() == -1) {

2554 }

2555 }

2556 break;

2557 case 'O':

2559 if ((C->getZExtValue() == 8) || (C->getZExtValue() == 16) ||

2560 (C->getZExtValue() == 24)) {

2562 }

2563 }

2564 break;

2565 case 'P':

2567 if (C->getZExtValue() == 1) {

2569 }

2570 }

2571 break;

2572 case 'R':

2574 if ((C->getSExtValue() >= -6) && (C->getSExtValue() <= 5)) {

2576 }

2577 }

2578 break;

2579 case 'Q':

2581 break;

2582 }

2583

2584 return weight;

2585}

2586

2587std::pair<unsigned, const TargetRegisterClass *>

2590 MVT VT) const {

2591 if (Constraint.size() == 1) {

2592 switch (Constraint[0]) {

2593 case 'a':

2594 if (VT == MVT::i8)

2595 return std::make_pair(0U, &AVR::LD8loRegClass);

2596 else if (VT == MVT::i16)

2597 return std::make_pair(0U, &AVR::DREGSLD8loRegClass);

2598 break;

2599 case 'b':

2600 if (VT == MVT::i8 || VT == MVT::i16)

2601 return std::make_pair(0U, &AVR::PTRDISPREGSRegClass);

2602 break;

2603 case 'd':

2604 if (VT == MVT::i8)

2605 return std::make_pair(0U, &AVR::LD8RegClass);

2606 else if (VT == MVT::i16)

2607 return std::make_pair(0U, &AVR::DLDREGSRegClass);

2608 break;

2609 case 'l':

2610 if (VT == MVT::i8)

2611 return std::make_pair(0U, &AVR::GPR8loRegClass);

2612 else if (VT == MVT::i16)

2613 return std::make_pair(0U, &AVR::DREGSloRegClass);

2614 break;

2615 case 'e':

2616 if (VT == MVT::i8 || VT == MVT::i16)

2617 return std::make_pair(0U, &AVR::PTRREGSRegClass);

2618 break;

2619 case 'q':

2620 return std::make_pair(0U, &AVR::GPRSPRegClass);

2621 case 'r':

2622 if (VT == MVT::i8)

2623 return std::make_pair(0U, &AVR::GPR8RegClass);

2624 else if (VT == MVT::i16)

2625 return std::make_pair(0U, &AVR::DREGSRegClass);

2626 break;

2627 case 't':

2628 if (VT == MVT::i8)

2629 return std::make_pair(unsigned(Subtarget.getTmpRegister()),

2630 &AVR::GPR8RegClass);

2631 break;

2632 case 'w':

2633 if (VT == MVT::i8 || VT == MVT::i16)

2634 return std::make_pair(0U, &AVR::IWREGSRegClass);

2635 break;

2636 case 'x':

2637 case 'X':

2638 if (VT == MVT::i8 || VT == MVT::i16)

2639 return std::make_pair(unsigned(AVR::R27R26), &AVR::PTRREGSRegClass);

2640 break;

2641 case 'y':

2642 case 'Y':

2643 if (VT == MVT::i8 || VT == MVT::i16)

2644 return std::make_pair(unsigned(AVR::R29R28), &AVR::PTRREGSRegClass);

2645 break;

2646 case 'z':

2647 case 'Z':

2648 if (VT == MVT::i8 || VT == MVT::i16)

2649 return std::make_pair(unsigned(AVR::R31R30), &AVR::PTRREGSRegClass);

2650 break;

2651 default:

2652 break;

2653 }

2654 }

2655

2657 Subtarget.getRegisterInfo(), Constraint, VT);

2658}

2659

2662 std::vector &Ops,

2666 EVT Ty = Op.getValueType();

2667

2668

2669 if (Constraint.size() != 1) {

2670 return;

2671 }

2672

2673 char ConstraintLetter = Constraint[0];

2674 switch (ConstraintLetter) {

2675 default:

2676 break;

2677

2678 case 'I':

2679 case 'J':

2680 case 'K':

2681 case 'L':

2682 case 'M':

2683 case 'N':

2684 case 'O':

2685 case 'P':

2686 case 'R': {

2688 if (C) {

2689 return;

2690 }

2691

2692 int64_t CVal64 = C->getSExtValue();

2693 uint64_t CUVal64 = C->getZExtValue();

2694 switch (ConstraintLetter) {

2695 case 'I':

2697 return;

2699 break;

2700 case 'J':

2701 if (CVal64 < -63 || CVal64 > 0)

2702 return;

2704 break;

2705 case 'K':

2706 if (CUVal64 != 2)

2707 return;

2709 break;

2710 case 'L':

2711 if (CUVal64 != 0)

2712 return;

2714 break;

2715 case 'M':

2717 return;

2718

2719

2720

2721 if (Ty.getSimpleVT() == MVT::i8) {

2722 Ty = MVT::i16;

2723 }

2725 break;

2726 case 'N':

2727 if (CVal64 != -1)

2728 return;

2730 break;

2731 case 'O':

2732 if (CUVal64 != 8 && CUVal64 != 16 && CUVal64 != 24)

2733 return;

2735 break;

2736 case 'P':

2737 if (CUVal64 != 1)

2738 return;

2740 break;

2741 case 'R':

2742 if (CVal64 < -6 || CVal64 > 5)

2743 return;

2745 break;

2746 }

2747

2748 break;

2749 }

2750 case 'G':

2752 if (!FC || !FC->isZero())

2753 return;

2754

2756 break;

2757 }

2758

2759 if (Result.getNode()) {

2760 Ops.push_back(Result);

2761 return;

2762 }

2763

2765}

2766

2770

2773 .Case("r0", AVR::R0)

2774 .Case("r1", AVR::R1)

2776 } else {

2778 .Case("r0", AVR::R1R0)

2779 .Case("sp", AVR::SP)

2781 }

2782

2783 if (Reg)

2784 return Reg;

2785

2788}

2789

2790}

unsigned const MachineRegisterInfo * MRI

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

const TargetInstrInfo & TII

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

Function Alias Analysis Results

static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")

const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]

Register const TargetRegisterInfo * TRI

Promote Memory to Register

SI Pre allocate WWM Registers

This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...

Utilities related to the AVR instruction set.

A specific AVR target MCU.

Register getZeroRegister() const

const AVRInstrInfo * getInstrInfo() const override

void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override

Replace a node with an illegal result type with a new node built out of custom code.

Definition AVRISelLowering.cpp:955

SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override

This callback is invoked for operations that are unsupported by the target, which are registered to u...

Definition AVRISelLowering.cpp:921

bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override

Returns true by value, base pointer and offset pointer and addressing mode by reference if the node's...

Definition AVRISelLowering.cpp:1014

std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override

Given a physical register constraint (e.g.

Definition AVRISelLowering.cpp:2588

MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override

This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...

Definition AVRISelLowering.cpp:2292

ConstraintType getConstraintType(StringRef Constraint) const override

Given a constraint, return the type of constraint it is for this target.

Definition AVRISelLowering.cpp:2418

const AVRSubtarget & Subtarget

InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const override

Definition AVRISelLowering.cpp:2461

bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override

Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...

Definition AVRISelLowering.cpp:984

ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override

Examine constraint string and operand type and determine a weight value.

Definition AVRISelLowering.cpp:2472

bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override

Return true if folding a constant offset with the given GlobalAddress is legal.

Definition AVRISelLowering.cpp:1138

Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override

Return the register ID of the name passed in.

Definition AVRISelLowering.cpp:2767

AVRTargetLowering(const AVRTargetMachine &TM, const AVRSubtarget &STI)

Definition AVRISelLowering.cpp:35

void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override

Lower the specified operand into the Ops vector.

Definition AVRISelLowering.cpp:2660

EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override

Return the ValueType of the result of SETCC operations.

Definition AVRISelLowering.cpp:205

bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override

Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...

Definition AVRISelLowering.cpp:1070

A generic AVR implementation.

ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...

size_t size() const

size - Get the array size.

LLVM Basic Block Representation.

CCState - This class holds information needed while lowering arguments and return values.

MCRegister AllocateReg(MCPhysReg Reg)

AllocateReg - Attempt to allocate one register.

LLVMContext & getContext() const

int64_t AllocateStack(unsigned Size, Align Alignment)

AllocateStack - Allocate a chunk of stack space with the specified size and alignment.

void addLoc(const CCValAssign &V)

static CCValAssign getReg(unsigned ValNo, MVT ValVT, MCRegister Reg, MVT LocVT, LocInfo HTP, bool IsCustom=false)

static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)

ConstantFP - Floating Point Values [float, double].

This is the shared class of boolean and integer constants.

uint64_t getNumOperands() const

A parsed version of the target data layout string in and methods for querying it.

LLVM_ABI Align getABITypeAlign(Type *Ty) const

Returns the minimum ABI-required alignment for the specified type.

LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const

Returns the offset in bytes between successive objects of the specified type, including alignment pad...

static constexpr LLT scalar(unsigned SizeInBits)

Get a low-level scalar or aggregate "bag of bits".

This is an important class for using LLVM in a threaded context.

This class is used to represent ISD::LOAD nodes.

static auto integer_valuetypes()

TypeSize getStoreSize() const

Return the number of bytes overwritten by a store of the specified value type.

LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)

Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...

void setCallFrameSize(unsigned N)

Set the call frame size on entry to this basic block.

LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())

Add Succ as a successor of this MachineBasicBlock.

const MachineFunction * getParent() const

Return the MachineFunction containing this basic block.

void splice(iterator Where, MachineBasicBlock *Other, iterator From)

Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...

MachineInstrBundleIterator< MachineInstr > iterator

const TargetSubtargetInfo & getSubtarget() const

getSubtarget - Return the subtarget for which this machine code is being compiled.

MachineRegisterInfo & getRegInfo()

getRegInfo - Return information about the registers currently in use.

BasicBlockListType::iterator iterator

MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)

CreateMachineInstr - Allocate a new MachineInstr.

void insert(iterator MBBI, MachineBasicBlock *MBB)

const MachineInstrBuilder & addImm(int64_t Val) const

Add a new immediate operand.

const MachineInstrBuilder & add(const MachineOperand &MO) const

const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const

Add a new virtual register operand.

const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const

Representation of each machine instruction.

MachineRegisterInfo - Keep track of information for virtual and physical registers,...

MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...

MutableArrayRef< T > slice(size_t N, size_t M) const

slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.

Wrapper class representing virtual and physical registers.

Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...

Represents one node in the SelectionDAG.

unsigned getNumValues() const

Return the number of values defined/returned by this operator.

Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.

SDValue getValue(unsigned R) const

This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...

LLVM_ABI SDVTList getVTList(EVT VT)

Return an SDVTList that represents the list of values specified.

LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)

Create a ConstantSDNode wrapping a constant value.

LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)

LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)

Gets or creates the specified node.

SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)

This class consists of common code factored out of the SmallVector class to reduce code duplication b...

This class is used to represent ISD::STORE nodes.

StringRef - Represent a constant reference to a string, i.e.

constexpr size_t size() const

size - Get the string size.

A switch()-like statement whose cases are string literals.

StringSwitch & Case(StringLiteral S, T Value)

static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)

This static method is the primary way to create a literal StructType.

TargetInstrInfo - Interface to description of machine instruction set.

void setBooleanVectorContents(BooleanContent Ty)

Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...

void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)

Indicate that the specified operation does not work with the specified type and indicate what to do a...

CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const

Get the CallingConv that should be used for the specified libcall.

void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)

Indicate that the specified indexed load does or does not work with the specified type and indicate w...

void setMinFunctionAlignment(Align Alignment)

Set the target's minimum function alignment.

void setBooleanContents(BooleanContent Ty)

Specify how the target extends the result of integer and floating point boolean values from i1 to a w...

void computeRegisterProperties(const TargetRegisterInfo *TRI)

Once all of the register classes are added, this allows us to compute derived properties we expose.

void addRegisterClass(MVT VT, const TargetRegisterClass *RC)

Add the specified register class as an available regclass for the specified value type.

void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)

Indicate that the specified indexed store does or does not work with the specified type and indicate ...

void setSupportsUnalignedAtomics(bool UnalignedSupported)

Sets whether unaligned atomic operations are supported.

virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const

Return the pointer type for the given address space, defaults to the pointer type from the data layou...

void setMinimumJumpTableEntries(unsigned Val)

Indicate the minimum number of blocks to generate jump tables.

void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)

Indicate that the specified truncating store does not work with the specified type and indicate what ...

@ ZeroOrOneBooleanContent

void setStackPointerRegisterToSaveRestore(Register R)

If set to a physical register, this specifies the register that llvm.savestack/llvm....

void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)

Indicate that the specified load with extension does not work with the specified type and indicate wh...

const char * getLibcallName(RTLIB::Libcall Call) const

Get the libcall routine name for the specified libcall.

std::vector< ArgListEntry > ArgListTy

void setSchedulingPreference(Sched::Preference Pref)

Specify the target scheduling preference.

virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const

virtual ConstraintType getConstraintType(StringRef Constraint) const

Given a constraint, return the type of constraint it is for this target.

std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const

This function lowers an abstract call to a function into an actual call.

virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const

Examine constraint string and operand type and determine a weight value.

virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const

Given a physical register constraint (e.g.

TargetLowering(const TargetLowering &)=delete

virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const

Lower the specified operand into the Ops vector.

TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...

virtual const TargetInstrInfo * getInstrInfo() const

Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...

The instances of the Type class are immutable: once they are created, they are never changed.

LLVM Value Representation.

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

constexpr char Args[]

Key for Kernel::Metadata::mArgs.

CondCodes

AVR specific condition codes.

@ COND_SH

Unsigned same or higher.

@ COND_GE

Greater than or equal.

bool isProgramMemoryAccess(MemSDNode const *N)

constexpr std::underlying_type_t< E > Mask()

Get a bitmask with 1s in all places up to the high-order bit of E's largest value.

unsigned ID

LLVM IR allows to use arbitrary numbers as calling convention identifiers.

@ AVR_BUILTIN

Used for special AVR rtlib functions which have an "optimized" convention to preserve registers.

@ C

The default llvm calling convention, compatible with C.

NodeType

ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.

@ SETCC

SetCC operator - This evaluates to a true value iff the condition is true.

@ SMUL_LOHI

SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...

@ BSWAP

Byte Swap and Counting operators.

@ ADDC

Carry-setting nodes for multiple precision addition and subtraction.

@ ADD

Simple integer binary arithmetic operators.

@ ANY_EXTEND

ANY_EXTEND - Used for integer types. The high bits are undefined.

@ SDIVREM

SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.

@ BUILD_PAIR

BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.

@ SIGN_EXTEND

Conversion operators.

@ SELECT

Select(COND, TRUEVAL, FALSEVAL).

@ EXTRACT_ELEMENT

EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...

@ BasicBlock

Various leaf nodes.

@ MULHU

MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...

@ SHL

Shift and rotation operations.

@ ZERO_EXTEND

ZERO_EXTEND - Used for integer types, zeroing the new bits.

@ SELECT_CC

Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...

@ SIGN_EXTEND_INREG

SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...

@ AND

Bitwise operators - logical and, logical or, logical xor.

@ ADDE

Carry-using nodes for multiple precision addition and subtraction.

@ TokenFactor

TokenFactor - This node takes multiple tokens as input and produces a single token result.

@ TRUNCATE

TRUNCATE - Completely drop the high bits.

@ SHL_PARTS

SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.

@ AssertSext

AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...

MemIndexedMode

MemIndexedMode enum - This enum defines the load / store indexed addressing modes.

CondCode

ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...

@ Undef

Value of the register doesn't matter.

This is an optimization pass for GlobalISel generic memory operations.

FunctionAddr VTableAddr Value

static void analyzeReturnValues(const SmallVectorImpl< ArgT > &Args, CCState &CCInfo, bool Tiny)

Analyze incoming and outgoing value of returning from a function.

Definition AVRISelLowering.cpp:1269

MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)

Builder interface. Specify how to create the initial instruction itself.

decltype(auto) dyn_cast(const From &Val)

dyn_cast - Return the argument parameter cast to the specified type.

static const MCPhysReg RegList16Tiny[]

Definition AVRISelLowering.cpp:1162

constexpr bool has_single_bit(T Value) noexcept

static const MCPhysReg RegList8Tiny[]

Definition AVRISelLowering.cpp:1155

LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)

static void analyzeArguments(TargetLowering::CallLoweringInfo *CLI, const Function *F, const DataLayout *TD, const SmallVectorImpl< ArgT > &Args, SmallVectorImpl< CCValAssign > &ArgLocs, CCState &CCInfo, bool Tiny)

Analyze incoming and outgoing function arguments.

Definition AVRISelLowering.cpp:1176

constexpr bool isUInt(uint64_t x)

Checks if an unsigned integer fits into the given bit width.

class LLVM_GSL_OWNER SmallVector

Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

static const MCPhysReg RegList16AVR[]

Definition AVRISelLowering.cpp:1157

@ Sub

Subtraction of integers.

uint16_t MCPhysReg

An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...

static unsigned getTotalArgumentsSizeInBytes(const SmallVectorImpl< ArgT > &Args)

Count the total number of bytes needed to pass or return these arguments.

Definition AVRISelLowering.cpp:1256

uint64_t alignTo(uint64_t Size, Align A)

Returns a multiple of A needed to store Size bytes.

DWARFExpression::Operation Op

static AVRCC::CondCodes intCCToAVRCC(ISD::CondCode CC)

IntCCToAVRCC - Convert a DAG integer condition code to an AVR CC.

Definition AVRISelLowering.cpp:558

ArrayRef(const T &OneElt) -> ArrayRef< T >

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

static bool isCopyMulResult(MachineBasicBlock::iterator const &I)

Definition AVRISelLowering.cpp:2198

static void insertMultibyteShift(MachineInstr &MI, MachineBasicBlock *BB, MutableArrayRef< std::pair< Register, int > > Regs, ISD::NodeType Opc, int64_t ShiftAmt)

Definition AVRISelLowering.cpp:1856

static const MCPhysReg RegList8AVR[]

Registers for calling conventions, ordered in reverse as required by ABI.

Definition AVRISelLowering.cpp:1151

void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)

Implement std::swap in terms of BitVector swap.

This struct is a compact representation of a valid (non-zero power of two) alignment.

TypeSize getSizeInBits() const

Return the size of the specified value type in bits.

bool isVector() const

Return true if this is a vector value type.

LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const

This method returns an LLVM type corresponding to the specified EVT.

static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)

Stack pointer relative access.

static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)

Return a MachinePointerInfo record that refers to the specified FrameIndex.

This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...

This contains information for each constraint that we are lowering.

This structure contains all information that is necessary for lowering calls.