LLVM: lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

30#include "llvm/IR/IntrinsicsRISCV.h"

32

33using namespace llvm;

36

39 std::initializer_list IntOrFPVecTys,

42 return ST.hasVInstructions() &&

43 (Query.Types[TypeIdx].getScalarSizeInBits() != 64 ||

44 ST.hasVInstructionsI64()) &&

45 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||

46 ST.getELen() == 64);

47 };

48

50}

51

56 return ST.hasVInstructions() &&

57 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||

58 ST.getELen() == 64);

59 };

61}

62

64 std::initializer_list PtrVecTys,

67 return ST.hasVInstructions() &&

68 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||

69 ST.getELen() == 64) &&

70 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 16 ||

71 Query.Types[TypeIdx].getScalarSizeInBits() == 32);

72 };

74}

75

77 : STI(ST), XLen(STI.getXLen()), sXLen(LLT::scalar(XLen)) {

86

94

102

109

115

120

126

128

129 auto BoolVecTys = {nxv1s1, nxv2s1, nxv4s1, nxv8s1, nxv16s1, nxv32s1, nxv64s1};

130

131 auto IntOrFPVecTys = {nxv1s8, nxv2s8, nxv4s8, nxv8s8, nxv16s8, nxv32s8,

132 nxv64s8, nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16,

133 nxv32s16, nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,

134 nxv1s64, nxv2s64, nxv4s64, nxv8s64};

135

136 auto PtrVecTys = {nxv1p0, nxv2p0, nxv4p0, nxv8p0, nxv16p0};

137

139 .legalFor({sXLen})

144

146 .legalFor({sXLen})

150

152 {G_UADDE, G_UADDO, G_USUBE, G_USUBO}).lower();

153

155 .minScalar(0, sXLen)

157

158

160 {G_UADDSAT, G_SADDSAT, G_USUBSAT, G_SSUBSAT, G_SSHLSAT, G_USHLSAT})

161 .lower();

162

164 .legalFor({{sXLen, sXLen}})

165 .customFor(ST.is64Bit(), {{s32, s32}})

166 .widenScalarToNextPow2(0)

169

170 getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})

171 .legalFor({{s32, s16}})

172 .legalFor(ST.is64Bit(), {{s64, s16}, {s64, s32}})

177

178 getActionDefinitionsBuilder(G_SEXT_INREG)

180 .clampScalar(0, sXLen, sXLen)

182

183

184 for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {

185 auto &MergeUnmergeActions = getActionDefinitionsBuilder(Op);

186 unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;

187 unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;

188 if (XLen == 32 && ST.hasStdExtD()) {

189 MergeUnmergeActions.legalIf(

191 }

192 MergeUnmergeActions.widenScalarToNextPow2(LitTyIdx, XLen)

193 .widenScalarToNextPow2(BigTyIdx, XLen)

194 .clampScalar(LitTyIdx, sXLen, sXLen)

195 .clampScalar(BigTyIdx, sXLen, sXLen);

196 }

197

198 getActionDefinitionsBuilder({G_FSHL, G_FSHR}).lower();

199

200 getActionDefinitionsBuilder({G_ROTR, G_ROTL})

201 .legalFor(ST.hasStdExtZbb() || ST.hasStdExtZbkb(), {{sXLen, sXLen}})

202 .customFor(ST.is64Bit() && (ST.hasStdExtZbb() || ST.hasStdExtZbkb()),

203 {{s32, s32}})

204 .lower();

205

206 getActionDefinitionsBuilder(G_BITREVERSE).maxScalar(0, sXLen).lower();

207

208 getActionDefinitionsBuilder(G_BITCAST).legalIf(

213

214 auto &BSWAPActions = getActionDefinitionsBuilder(G_BSWAP);

215 if (ST.hasStdExtZbb() || ST.hasStdExtZbkb())

216 BSWAPActions.legalFor({sXLen}).clampScalar(0, sXLen, sXLen);

217 else

218 BSWAPActions.maxScalar(0, sXLen).lower();

219

220 auto &CountZerosActions = getActionDefinitionsBuilder({G_CTLZ, G_CTTZ});

221 auto &CountZerosUndefActions =

222 getActionDefinitionsBuilder({G_CTLZ_ZERO_UNDEF, G_CTTZ_ZERO_UNDEF});

223 if (ST.hasStdExtZbb()) {

224 CountZerosActions.legalFor({{sXLen, sXLen}})

225 .customFor({{s32, s32}})

226 .clampScalar(0, s32, sXLen)

227 .widenScalarToNextPow2(0)

228 .scalarSameSizeAs(1, 0);

229 } else {

230 CountZerosActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0).lower();

231 CountZerosUndefActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0);

232 }

233 CountZerosUndefActions.lower();

234

235 auto &CTPOPActions = getActionDefinitionsBuilder(G_CTPOP);

236 if (ST.hasStdExtZbb()) {

237 CTPOPActions.legalFor({{sXLen, sXLen}})

238 .clampScalar(0, sXLen, sXLen)

239 .scalarSameSizeAs(1, 0);

240 } else {

241 CTPOPActions.widenScalarToNextPow2(0, 8)

242 .clampScalar(0, s8, sXLen)

243 .scalarSameSizeAs(1, 0)

244 .lower();

245 }

246

247 getActionDefinitionsBuilder(G_CONSTANT)

248 .legalFor({p0})

249 .legalFor(ST.is64Bit(), {s32})

250 .customFor(ST.is64Bit(), {s64})

251 .widenScalarToNextPow2(0)

252 .clampScalar(0, sXLen, sXLen);

253

254

255 getActionDefinitionsBuilder(G_FREEZE)

256 .legalFor({s16, s32, p0})

257 .legalFor(ST.is64Bit(), {s64})

260 .widenScalarToNextPow2(0)

261 .clampScalar(0, s16, sXLen);

262

263

264

265 getActionDefinitionsBuilder(

266 {G_IMPLICIT_DEF, G_CONSTANT_FOLD_BARRIER})

267 .legalFor({s32, sXLen, p0})

270 .widenScalarToNextPow2(0)

271 .clampScalar(0, s32, sXLen);

272

273 getActionDefinitionsBuilder(G_ICMP)

274 .legalFor({{sXLen, sXLen}, {sXLen, p0}})

277 .widenScalarOrEltToNextPow2OrMinSize(1, 8)

278 .clampScalar(1, sXLen, sXLen)

279 .clampScalar(0, sXLen, sXLen);

280

281 getActionDefinitionsBuilder(G_SELECT)

282 .legalFor({{s32, sXLen}, {p0, sXLen}})

285 .legalFor(XLen == 64 || ST.hasStdExtD(), {{s64, sXLen}})

286 .widenScalarToNextPow2(0)

287 .clampScalar(0, s32, (XLen == 64 || ST.hasStdExtD()) ? s64 : s32)

288 .clampScalar(1, sXLen, sXLen);

289

290 auto &LoadActions = getActionDefinitionsBuilder(G_LOAD);

291 auto &StoreActions = getActionDefinitionsBuilder(G_STORE);

292 auto &ExtLoadActions = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD});

293

294

295

296

297 auto getScalarMemAlign = [&ST](unsigned Size) {

298 return ST.enableUnalignedScalarMem() ? 8 : Size;

299 };

300

301 LoadActions.legalForTypesWithMemDesc(

302 {{s16, p0, s8, getScalarMemAlign(8)},

303 {s32, p0, s8, getScalarMemAlign(8)},

304 {s16, p0, s16, getScalarMemAlign(16)},

305 {s32, p0, s16, getScalarMemAlign(16)},

306 {s32, p0, s32, getScalarMemAlign(32)},

307 {p0, p0, sXLen, getScalarMemAlign(XLen)}});

308 StoreActions.legalForTypesWithMemDesc(

309 {{s16, p0, s8, getScalarMemAlign(8)},

310 {s32, p0, s8, getScalarMemAlign(8)},

311 {s16, p0, s16, getScalarMemAlign(16)},

312 {s32, p0, s16, getScalarMemAlign(16)},

313 {s32, p0, s32, getScalarMemAlign(32)},

314 {p0, p0, sXLen, getScalarMemAlign(XLen)}});

315 ExtLoadActions.legalForTypesWithMemDesc(

316 {{sXLen, p0, s8, getScalarMemAlign(8)},

317 {sXLen, p0, s16, getScalarMemAlign(16)}});

318 if (XLen == 64) {

319 LoadActions.legalForTypesWithMemDesc(

320 {{s64, p0, s8, getScalarMemAlign(8)},

321 {s64, p0, s16, getScalarMemAlign(16)},

322 {s64, p0, s32, getScalarMemAlign(32)},

323 {s64, p0, s64, getScalarMemAlign(64)}});

324 StoreActions.legalForTypesWithMemDesc(

325 {{s64, p0, s8, getScalarMemAlign(8)},

326 {s64, p0, s16, getScalarMemAlign(16)},

327 {s64, p0, s32, getScalarMemAlign(32)},

328 {s64, p0, s64, getScalarMemAlign(64)}});

329 ExtLoadActions.legalForTypesWithMemDesc(

330 {{s64, p0, s32, getScalarMemAlign(32)}});

331 } else if (ST.hasStdExtD()) {

332 LoadActions.legalForTypesWithMemDesc(

333 {{s64, p0, s64, getScalarMemAlign(64)}});

334 StoreActions.legalForTypesWithMemDesc(

335 {{s64, p0, s64, getScalarMemAlign(64)}});

336 }

337

338

339 if (ST.hasVInstructions()) {

340 LoadActions.legalForTypesWithMemDesc({{nxv2s8, p0, nxv2s8, 8},

341 {nxv4s8, p0, nxv4s8, 8},

342 {nxv8s8, p0, nxv8s8, 8},

343 {nxv16s8, p0, nxv16s8, 8},

344 {nxv32s8, p0, nxv32s8, 8},

345 {nxv64s8, p0, nxv64s8, 8},

346 {nxv2s16, p0, nxv2s16, 16},

347 {nxv4s16, p0, nxv4s16, 16},

348 {nxv8s16, p0, nxv8s16, 16},

349 {nxv16s16, p0, nxv16s16, 16},

350 {nxv32s16, p0, nxv32s16, 16},

351 {nxv2s32, p0, nxv2s32, 32},

352 {nxv4s32, p0, nxv4s32, 32},

353 {nxv8s32, p0, nxv8s32, 32},

354 {nxv16s32, p0, nxv16s32, 32}});

355 StoreActions.legalForTypesWithMemDesc({{nxv2s8, p0, nxv2s8, 8},

356 {nxv4s8, p0, nxv4s8, 8},

357 {nxv8s8, p0, nxv8s8, 8},

358 {nxv16s8, p0, nxv16s8, 8},

359 {nxv32s8, p0, nxv32s8, 8},

360 {nxv64s8, p0, nxv64s8, 8},

361 {nxv2s16, p0, nxv2s16, 16},

362 {nxv4s16, p0, nxv4s16, 16},

363 {nxv8s16, p0, nxv8s16, 16},

364 {nxv16s16, p0, nxv16s16, 16},

365 {nxv32s16, p0, nxv32s16, 16},

366 {nxv2s32, p0, nxv2s32, 32},

367 {nxv4s32, p0, nxv4s32, 32},

368 {nxv8s32, p0, nxv8s32, 32},

369 {nxv16s32, p0, nxv16s32, 32}});

370

371 if (ST.getELen() == 64) {

372 LoadActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},

373 {nxv1s16, p0, nxv1s16, 16},

374 {nxv1s32, p0, nxv1s32, 32}});

375 StoreActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},

376 {nxv1s16, p0, nxv1s16, 16},

377 {nxv1s32, p0, nxv1s32, 32}});

378 }

379

380 if (ST.hasVInstructionsI64()) {

381 LoadActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},

382 {nxv2s64, p0, nxv2s64, 64},

383 {nxv4s64, p0, nxv4s64, 64},

384 {nxv8s64, p0, nxv8s64, 64}});

385 StoreActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},

386 {nxv2s64, p0, nxv2s64, 64},

387 {nxv4s64, p0, nxv4s64, 64},

388 {nxv8s64, p0, nxv8s64, 64}});

389 }

390

391

392

395

396

397 if (XLen <= ST.getELen()) {

400 }

401 }

402

403 LoadActions.widenScalarToNextPow2(0, 8)

404 .lowerIfMemSizeNotByteSizePow2()

405 .clampScalar(0, s16, sXLen)

406 .lower();

407 StoreActions

408 .clampScalar(0, s16, sXLen)

409 .lowerIfMemSizeNotByteSizePow2()

410 .lower();

411

412 ExtLoadActions.widenScalarToNextPow2(0).clampScalar(0, sXLen, sXLen).lower();

413

414 getActionDefinitionsBuilder({G_PTR_ADD, G_PTRMASK}).legalFor({{p0, sXLen}});

415

416 getActionDefinitionsBuilder(G_PTRTOINT)

417 .legalFor({{sXLen, p0}})

418 .clampScalar(0, sXLen, sXLen);

419

420 getActionDefinitionsBuilder(G_INTTOPTR)

421 .legalFor({{p0, sXLen}})

422 .clampScalar(1, sXLen, sXLen);

423

424 getActionDefinitionsBuilder(G_BRCOND).legalFor({sXLen}).minScalar(0, sXLen);

425

426 getActionDefinitionsBuilder(G_BRJT).customFor({{p0, sXLen}});

427

428 getActionDefinitionsBuilder(G_BRINDIRECT).legalFor({p0});

429

430 getActionDefinitionsBuilder(G_PHI)

431 .legalFor({p0, s32, sXLen})

432 .widenScalarToNextPow2(0)

433 .clampScalar(0, s32, sXLen);

434

435 getActionDefinitionsBuilder({G_GLOBAL_VALUE, G_JUMP_TABLE, G_CONSTANT_POOL})

436 .legalFor({p0});

437

438 if (ST.hasStdExtZmmul()) {

439 getActionDefinitionsBuilder(G_MUL)

440 .legalFor({sXLen})

441 .widenScalarToNextPow2(0)

442 .clampScalar(0, sXLen, sXLen);

443

444

445 getActionDefinitionsBuilder({G_SMULH, G_UMULH})

446 .legalFor({sXLen})

447 .lower();

448

449

450 getActionDefinitionsBuilder({G_SMULO, G_UMULO}).minScalar(0, sXLen).lower();

451 } else {

452 getActionDefinitionsBuilder(G_MUL)

453 .libcallFor({sXLen, sDoubleXLen})

454 .widenScalarToNextPow2(0)

455 .clampScalar(0, sXLen, sDoubleXLen);

456

457 getActionDefinitionsBuilder({G_SMULH, G_UMULH}).lowerFor({sXLen});

458

459 getActionDefinitionsBuilder({G_SMULO, G_UMULO})

460 .minScalar(0, sXLen)

461

462

463

464 .widenScalarIf(typeIs(0, sXLen),

466 .lower();

467 }

468

469 if (ST.hasStdExtM()) {

470 getActionDefinitionsBuilder({G_SDIV, G_UDIV, G_UREM})

471 .legalFor({sXLen})

472 .customFor({s32})

473 .libcallFor({sDoubleXLen})

474 .clampScalar(0, s32, sDoubleXLen)

475 .widenScalarToNextPow2(0);

476 getActionDefinitionsBuilder(G_SREM)

477 .legalFor({sXLen})

478 .libcallFor({sDoubleXLen})

479 .clampScalar(0, sXLen, sDoubleXLen)

480 .widenScalarToNextPow2(0);

481 } else {

482 getActionDefinitionsBuilder({G_UDIV, G_SDIV, G_UREM, G_SREM})

483 .libcallFor({sXLen, sDoubleXLen})

484 .clampScalar(0, sXLen, sDoubleXLen)

485 .widenScalarToNextPow2(0);

486 }

487

488

489 getActionDefinitionsBuilder({G_SDIVREM, G_UDIVREM}).lower();

490

491 getActionDefinitionsBuilder(G_ABS)

492 .customFor(ST.hasStdExtZbb(), {sXLen})

493 .minScalar(ST.hasStdExtZbb(), 0, sXLen)

494 .lower();

495

496 getActionDefinitionsBuilder({G_ABDS, G_ABDU})

497 .minScalar(ST.hasStdExtZbb(), 0, sXLen)

498 .lower();

499

500 getActionDefinitionsBuilder({G_UMAX, G_UMIN, G_SMAX, G_SMIN})

501 .legalFor(ST.hasStdExtZbb(), {sXLen})

502 .minScalar(ST.hasStdExtZbb(), 0, sXLen)

503 .lower();

504

505 getActionDefinitionsBuilder({G_SCMP, G_UCMP}).lower();

506

507 getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0});

508

509 getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE, G_MEMSET}).libcall();

510

511 getActionDefinitionsBuilder({G_DYN_STACKALLOC, G_STACKSAVE, G_STACKRESTORE})

512 .lower();

513

514

515

516

517 getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMA, G_FSQRT,

518 G_FMAXNUM, G_FMINNUM, G_FMAXIMUMNUM,

519 G_FMINIMUMNUM})

520 .legalFor(ST.hasStdExtF(), {s32})

521 .legalFor(ST.hasStdExtD(), {s64})

522 .legalFor(ST.hasStdExtZfh(), {s16})

523 .libcallFor({s32, s64})

524 .libcallFor(ST.is64Bit(), {s128});

525

526 getActionDefinitionsBuilder({G_FNEG, G_FABS})

527 .legalFor(ST.hasStdExtF(), {s32})

528 .legalFor(ST.hasStdExtD(), {s64})

529 .legalFor(ST.hasStdExtZfh(), {s16})

530 .lowerFor({s32, s64, s128});

531

532 getActionDefinitionsBuilder(G_FREM)

533 .libcallFor({s32, s64})

534 .libcallFor(ST.is64Bit(), {s128})

535 .minScalar(0, s32)

536 .scalarize(0);

537

538 getActionDefinitionsBuilder(G_FCOPYSIGN)

539 .legalFor(ST.hasStdExtF(), {{s32, s32}})

540 .legalFor(ST.hasStdExtD(), {{s64, s64}, {s32, s64}, {s64, s32}})

541 .legalFor(ST.hasStdExtZfh(), {{s16, s16}, {s16, s32}, {s32, s16}})

542 .legalFor(ST.hasStdExtZfh() && ST.hasStdExtD(), {{s16, s64}, {s64, s16}})

543 .lower();

544

545

546 getActionDefinitionsBuilder(G_FPTRUNC)

547 .legalFor(ST.hasStdExtD(), {{s32, s64}})

548 .legalFor(ST.hasStdExtZfh(), {{s16, s32}})

549 .legalFor(ST.hasStdExtZfh() && ST.hasStdExtD(), {{s16, s64}})

550 .libcallFor({{s32, s64}})

551 .libcallFor(ST.is64Bit(), {{s32, s128}, {s64, s128}});

552 getActionDefinitionsBuilder(G_FPEXT)

553 .legalFor(ST.hasStdExtD(), {{s64, s32}})

554 .legalFor(ST.hasStdExtZfh(), {{s32, s16}})

555 .legalFor(ST.hasStdExtZfh() && ST.hasStdExtD(), {{s64, s16}})

556 .libcallFor({{s64, s32}})

557 .libcallFor(ST.is64Bit(), {{s128, s32}, {s128, s64}});

558

559 getActionDefinitionsBuilder(G_FCMP)

560 .legalFor(ST.hasStdExtF(), {{sXLen, s32}})

561 .legalFor(ST.hasStdExtD(), {{sXLen, s64}})

562 .legalFor(ST.hasStdExtZfh(), {{sXLen, s16}})

563 .clampScalar(0, sXLen, sXLen)

564 .libcallFor({{sXLen, s32}, {sXLen, s64}})

565 .libcallFor(ST.is64Bit(), {{sXLen, s128}});

566

567

568 getActionDefinitionsBuilder(G_IS_FPCLASS)

569 .customFor(ST.hasStdExtF(), {{s1, s32}})

570 .customFor(ST.hasStdExtD(), {{s1, s64}})

571 .customFor(ST.hasStdExtZfh(), {{s1, s16}})

572 .lowerFor({{s1, s32}, {s1, s64}});

573

574 getActionDefinitionsBuilder(G_FCONSTANT)

575 .legalFor(ST.hasStdExtF(), {s32})

576 .legalFor(ST.hasStdExtD(), {s64})

577 .legalFor(ST.hasStdExtZfh(), {s16})

578 .customFor(ST.is64Bit(), {s32})

579 .customFor(ST.is64Bit(), {s32, s64})

580 .lowerFor({s64, s128});

581

582 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})

583 .legalFor(ST.hasStdExtF(), {{sXLen, s32}})

584 .legalFor(ST.hasStdExtD(), {{sXLen, s64}})

585 .legalFor(ST.hasStdExtZfh(), {{sXLen, s16}})

586 .customFor(ST.is64Bit() && ST.hasStdExtF(), {{s32, s32}})

587 .customFor(ST.is64Bit() && ST.hasStdExtD(), {{s32, s64}})

588 .customFor(ST.is64Bit() && ST.hasStdExtZfh(), {{s32, s16}})

589 .widenScalarToNextPow2(0)

590 .minScalar(0, s32)

591 .libcallFor({{s32, s32}, {s64, s32}, {s32, s64}, {s64, s64}})

592 .libcallFor(ST.is64Bit(), {{s32, s128}, {s64, s128}})

593 .libcallFor(ST.is64Bit(), {{s128, s32}, {s128, s64}, {s128, s128}});

594

595 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})

596 .legalFor(ST.hasStdExtF(), {{s32, sXLen}})

597 .legalFor(ST.hasStdExtD(), {{s64, sXLen}})

598 .legalFor(ST.hasStdExtZfh(), {{s16, sXLen}})

599 .widenScalarToNextPow2(1)

600

601 .widenScalarIf(

603 return Query.Types[0].isScalar() && Query.Types[1].isScalar() &&

604 (Query.Types[1].getSizeInBits() < ST.getXLen()) &&

605 ((ST.hasStdExtF() && Query.Types[0].getSizeInBits() == 32) ||

606 (ST.hasStdExtD() && Query.Types[0].getSizeInBits() == 64) ||

607 (ST.hasStdExtZfh() &&

608 Query.Types[0].getSizeInBits() == 16));

609 },

611

612 .minScalar(1, s32)

613 .libcallFor({{s32, s32}, {s64, s32}, {s32, s64}, {s64, s64}})

614 .libcallFor(ST.is64Bit(), {{s128, s32}, {s128, s64}})

615 .libcallFor(ST.is64Bit(), {{s32, s128}, {s64, s128}, {s128, s128}});

616

617

618 getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR, G_FRINT, G_FNEARBYINT,

619 G_INTRINSIC_TRUNC, G_INTRINSIC_ROUND,

620 G_INTRINSIC_ROUNDEVEN})

621 .legalFor(ST.hasStdExtZfa(), {s32})

622 .legalFor(ST.hasStdExtZfa() && ST.hasStdExtD(), {s64})

623 .legalFor(ST.hasStdExtZfa() && ST.hasStdExtZfh(), {s16})

624 .libcallFor({s32, s64})

625 .libcallFor(ST.is64Bit(), {s128});

626

627 getActionDefinitionsBuilder({G_FMAXIMUM, G_FMINIMUM})

628 .legalFor(ST.hasStdExtZfa(), {s32})

629 .legalFor(ST.hasStdExtZfa() && ST.hasStdExtD(), {s64})

630 .legalFor(ST.hasStdExtZfa() && ST.hasStdExtZfh(), {s16});

631

632 getActionDefinitionsBuilder({G_FCOS, G_FSIN, G_FTAN, G_FPOW, G_FLOG, G_FLOG2,

633 G_FLOG10, G_FEXP, G_FEXP2, G_FEXP10, G_FACOS,

634 G_FASIN, G_FATAN, G_FATAN2, G_FCOSH, G_FSINH,

635 G_FTANH, G_FMODF})

636 .libcallFor({s32, s64})

637 .libcallFor(ST.is64Bit(), {s128});

638 getActionDefinitionsBuilder({G_FPOWI, G_FLDEXP})

639 .libcallFor({{s32, s32}, {s64, s32}})

640 .libcallFor(ST.is64Bit(), {s128, s32});

641

642 getActionDefinitionsBuilder(G_VASTART).customFor({p0});

643

644

645

646 getActionDefinitionsBuilder(G_VAARG)

647

648

649 .clampScalar(0, sXLen, sXLen)

650 .lowerForCartesianProduct({sXLen, p0}, {p0});

651

652 getActionDefinitionsBuilder(G_VSCALE)

653 .clampScalar(0, sXLen, sXLen)

654 .customFor({sXLen});

655

656 auto &SplatActions =

657 getActionDefinitionsBuilder(G_SPLAT_VECTOR)

661

662

663

664

665

666

667

668 if (XLen == 32) {

669 if (ST.hasVInstructionsF64() && ST.hasStdExtD())

670 SplatActions.legalIf(all(

671 typeInSet(0, {nxv1s64, nxv2s64, nxv4s64, nxv8s64}), typeIs(1, s64)));

672 else if (ST.hasVInstructionsI64())

673 SplatActions.customIf(all(

674 typeInSet(0, {nxv1s64, nxv2s64, nxv4s64, nxv8s64}), typeIs(1, s64)));

675 }

676

677 SplatActions.clampScalar(1, sXLen, sXLen);

678

685 };

686 getActionDefinitionsBuilder(G_EXTRACT_SUBVECTOR)

687

688

689

690 .bitcastIf(

695 Query.Types[0].getElementCount().divideCoefficientBy(8), 8);

696 return std::pair(0, CastTy);

697 })

703

704 getActionDefinitionsBuilder(G_INSERT_SUBVECTOR)

709

710 getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG_WITH_SUCCESS)

712

713 getActionDefinitionsBuilder({G_ATOMIC_CMPXCHG, G_ATOMICRMW_ADD})

714 .legalFor(ST.hasStdExtA(), {{sXLen, p0}})

715 .libcallFor(ST.hasStdExtA(), {{s8, p0}, {s16, p0}, {s32, p0}, {s64, p0}})

716 .clampScalar(0, sXLen, sXLen);

717

718 getActionDefinitionsBuilder(G_ATOMICRMW_SUB)

719 .libcallFor(ST.hasStdExtA(), {{s8, p0}, {s16, p0}, {s32, p0}, {s64, p0}})

720 .clampScalar(0, sXLen, sXLen)

721 .lower();

722

727 };

728

729 getActionDefinitionsBuilder(G_INSERT_VECTOR_ELT)

731 InsertVectorEltPred, typeIs(2, sXLen)))

734

735 getLegacyLegalizerInfo().computeTables();

737}

738

742

744 RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID)) {

745 if (II->hasScalarOperand() && II->IsFPIntrinsic) {

748

749 auto OldScalar = MI.getOperand(II->ScalarOperand + 2).getReg();

750

751 if (MRI.getType(OldScalar).isScalar()) {

752 if (MRI.getType(OldScalar).getSizeInBits() < sXLen.getSizeInBits()) {

755 TargetOpcode::G_ANYEXT);

757 } else if (MRI.getType(OldScalar).getSizeInBits() >

758 sXLen.getSizeInBits()) {

759

760 return false;

761 }

762 }

763 }

764 return true;

765 }

766

767 switch (IntrinsicID) {

768 default:

769 return false;

770 case Intrinsic::vacopy: {

771

772

773

779

780 Register DstLst = MI.getOperand(1).getReg();

781 LLT PtrTy = MRI.getType(DstLst);

782

783

787 auto Tmp = MIRBuilder.buildLoad(PtrTy, MI.getOperand(2), *LoadMMO);

788

789

792 MIRBuilder.buildStore(Tmp, DstLst, *StoreMMO);

793

794 MI.eraseFromParent();

795 return true;

796 }

797 case Intrinsic::riscv_masked_atomicrmw_add:

798 case Intrinsic::riscv_masked_atomicrmw_sub:

799 case Intrinsic::riscv_masked_cmpxchg:

800 return true;

801 }

802}

803

804bool RISCVLegalizerInfo::legalizeVAStart(MachineInstr &MI,

806

807 assert(MI.getOpcode() == TargetOpcode::G_VASTART);

811 LLT AddrTy = MIRBuilder.getMRI()->getType(MI.getOperand(0).getReg());

813 assert(MI.hasOneMemOperand());

814 MIRBuilder.buildStore(FINAddr, MI.getOperand(0).getReg(),

815 *MI.memoperands()[0]);

816 MI.eraseFromParent();

817 return true;

818}

819

820bool RISCVLegalizerInfo::legalizeBRJT(MachineInstr &MI,

823 auto &MF = *MI.getParent()->getParent();

826

827 Register PtrReg = MI.getOperand(0).getReg();

828 LLT PtrTy = MRI.getType(PtrReg);

829 Register IndexReg = MI.getOperand(2).getReg();

830 LLT IndexTy = MRI.getType(IndexReg);

831

833 return false;

834

836 IndexReg = MIRBuilder.buildShl(IndexTy, IndexReg, ShiftAmt).getReg(0);

837

838 auto Addr = MIRBuilder.buildPtrAdd(PtrTy, PtrReg, IndexReg);

839

843

846 default:

847 return false;

849

850

851

852 unsigned LoadOpc =

853 STI.is64Bit() ? TargetOpcode::G_SEXTLOAD : TargetOpcode::G_LOAD;

854 auto Load = MIRBuilder.buildLoadInstr(LoadOpc, IndexTy, Addr, *MMO);

855 TargetReg = MIRBuilder.buildPtrAdd(PtrTy, PtrReg, Load).getReg(0);

856 break;

857 }

859 auto Load = MIRBuilder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, IndexTy,

860 Addr, *MMO);

862 break;

863 }

865 TargetReg = MIRBuilder.buildLoad(PtrTy, Addr, *MMO).getReg(0);

866 break;

867 }

868

870

871 MI.eraseFromParent();

872 return true;

873}

874

875bool RISCVLegalizerInfo::shouldBeInConstantPool(const APInt &APImm,

876 bool ShouldOptForSize) const {

879

880

881

882

884 return false;

885

886

887 if (!STI.useConstantPoolForLargeInts())

888 return false;

889

891 if (Seq.size() <= STI.getMaxBuildIntsCost())

892 return false;

893

894

895

896 if (ShouldOptForSize)

897 return true;

898

899

900

901

902

903

904 unsigned ShiftAmt, AddOpc;

907 return !(!SeqLo.empty() && (SeqLo.size() + 2) <= STI.getMaxBuildIntsCost());

908}

909

910bool RISCVLegalizerInfo::legalizeVScale(MachineInstr &MI,

912 const LLT XLenTy(STI.getXLenVT());

913 Register Dst = MI.getOperand(0).getReg();

914

915

916

917

920

921 return false;

922

923

924

925 uint64_t Val = MI.getOperand(1).getCImm()->getZExtValue();

928 if (Log2 < 3) {

929 auto VLENB = MIB.buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});

931 } else if (Log2 > 3) {

932 auto VLENB = MIB.buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});

934 } else {

935 MIB.buildInstr(RISCV::G_READ_VLENB, {Dst}, {});

936 }

937 } else if ((Val % 8) == 0) {

938

939

940 auto VLENB = MIB.buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});

942 } else {

943 auto VLENB = MIB.buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});

946 }

947 MI.eraseFromParent();

948 return true;

949}

950

951

952

953

954

955bool RISCVLegalizerInfo::legalizeExt(MachineInstr &MI,

957

958 unsigned Opc = MI.getOpcode();

959 assert(Opc == TargetOpcode::G_ZEXT || Opc == TargetOpcode::G_SEXT ||

960 Opc == TargetOpcode::G_ANYEXT);

961

962 MachineRegisterInfo &MRI = *MIB.getMRI();

963 Register Dst = MI.getOperand(0).getReg();

964 Register Src = MI.getOperand(1).getReg();

965

966 LLT DstTy = MRI.getType(Dst);

967 int64_t ExtTrueVal = Opc == TargetOpcode::G_SEXT ? -1 : 1;

970 auto SplatTrue =

972 MIB.buildSelect(Dst, Src, SplatTrue, SplatZero);

973

974 MI.eraseFromParent();

975 return true;

976}

977

978bool RISCVLegalizerInfo::legalizeLoadStore(MachineInstr &MI,

982 "Machine instructions must be Load/Store.");

983 MachineRegisterInfo &MRI = *MIB.getMRI();

984 MachineFunction *MF = MI.getMF();

987

988 Register DstReg = MI.getOperand(0).getReg();

989 LLT DataTy = MRI.getType(DstReg);

991 return false;

992

993 if (MI.hasOneMemOperand())

994 return false;

995

996 MachineMemOperand *MMO = *MI.memoperands_begin();

997

998 const auto *TLI = STI.getTargetLowering();

1000

1001 if (TLI->allowsMemoryAccessForAlignment(Ctx, DL, VT, *MMO))

1002 return true;

1003

1005 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&

1006 "Unexpected unaligned RVV load type");

1007

1008

1009 unsigned NumElements =

1012

1013 Helper.bitcast(MI, 0, NewDataTy);

1014

1015 return true;

1016}

1017

1018

1019

1020

1026

1027

1028

1033 return MIB.buildInstr(RISCV::G_VMSET_VL, {MaskTy}, {VL});

1034}

1035

1036

1037

1038static std::pair<MachineInstrBuilder, MachineInstrBuilder>

1045 return {Mask, VL};

1046}

1047

1052

1053

1054

1055

1056

1057

1058

1059

1060 return MIB.buildInstr(RISCV::G_SPLAT_VECTOR_SPLIT_I64_VL, {Dst},

1061 {Passthru, Lo, Hi, VL});

1062}

1063

1066 const SrcOp &Scalar, const SrcOp &VL,

1071 Unmerge.getReg(1), VL, MIB, MRI);

1072}

1073

1074

1075

1076

1077

1078bool RISCVLegalizerInfo::legalizeSplatVector(MachineInstr &MI,

1080 assert(MI.getOpcode() == TargetOpcode::G_SPLAT_VECTOR);

1081

1082 MachineRegisterInfo &MRI = *MIB.getMRI();

1083

1084 Register Dst = MI.getOperand(0).getReg();

1085 Register SplatVal = MI.getOperand(1).getReg();

1086

1087 LLT VecTy = MRI.getType(Dst);

1088 LLT XLenTy(STI.getXLenVT());

1089

1090

1091 if (XLenTy.getSizeInBits() == 32 &&

1096 MI.eraseFromParent();

1097 return true;

1098 }

1099

1100

1101 MachineInstr &SplatValMI = *MRI.getVRegDef(SplatVal);

1104 MIB.buildInstr(RISCV::G_VMSET_VL, {Dst}, {VL});

1105 MI.eraseFromParent();

1106 return true;

1107 }

1110 MIB.buildInstr(RISCV::G_VMCLR_VL, {Dst}, {VL});

1111 MI.eraseFromParent();

1112 return true;

1113 }

1114

1115

1116

1119 auto ZExtSplatVal = MIB.buildZExt(InterEltTy, SplatVal);

1120 auto And =

1123 auto ZeroSplat =

1126 MI.eraseFromParent();

1127 return true;

1128}

1129

1132 "Unexpected vector LLT");

1136}

1137

1138bool RISCVLegalizerInfo::legalizeExtractSubvector(MachineInstr &MI,

1141

1142 MachineRegisterInfo &MRI = *MIB.getMRI();

1143

1147

1148

1149

1150 if (Idx == 0)

1151 return true;

1152

1153 LLT LitTy = MRI.getType(Dst);

1154 LLT BigTy = MRI.getType(Src);

1155

1157

1158

1159

1160

1163 auto BigZExt = MIB.buildZExt(ExtBigTy, Src);

1168 MI.eraseFromParent();

1169 return true;

1170 }

1171

1172

1173

1174 const RISCVRegisterInfo *TRI = STI.getRegisterInfo();

1176 auto Decompose =

1179 unsigned RemIdx = Decompose.second;

1180

1181

1182

1183

1184 if (RemIdx == 0)

1185 return true;

1186

1187

1188

1189

1193

1194

1195

1196 LLT InterLitTy = BigTy;

1199 getLMUL1Ty(BigTy).getSizeInBits())) {

1200

1201

1202 assert(Decompose.first != RISCV::NoSubRegister);

1204

1205

1206

1208 }

1209

1210

1211

1212 const LLT XLenTy(STI.getXLenVT());

1213 auto SlidedownAmt = MIB.buildVScale(XLenTy, RemIdx);

1217 RISCV::G_VSLIDEDOWN_VL, {InterLitTy},

1218 {MIB.buildUndef(InterLitTy), Vec, SlidedownAmt, Mask, VL, Policy});

1219

1220

1221

1223

1224 MI.eraseFromParent();

1225 return true;

1226}

1227

1228bool RISCVLegalizerInfo::legalizeInsertSubvector(MachineInstr &MI,

1232

1233 MachineRegisterInfo &MRI = *MIB.getMRI();

1234

1239

1240 LLT BigTy = MRI.getType(BigVec);

1241 LLT LitTy = MRI.getType(LitVec);

1242

1243 if (Idx == 0 ||

1244 MRI.getVRegDef(BigVec)->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)

1245 return true;

1246

1247

1248

1249

1250

1254 if (BigTyMinElts >= 8 && LitTyMinElts >= 8)

1256 IS, 0,

1258

1259

1260

1261

1262

1264 return Helper.widenScalar(IS, 0, ExtBigTy);

1265 }

1266

1267 const RISCVRegisterInfo *TRI = STI.getRegisterInfo();

1268 unsigned SubRegIdx, RemIdx;

1269 std::tie(SubRegIdx, RemIdx) =

1272

1275 STI.expandVScale(LitTy.getSizeInBits()).getKnownMinValue()));

1276 bool ExactlyVecRegSized =

1278 .isKnownMultipleOf(STI.expandVScale(VecRegSize));

1279

1280

1281

1282

1283

1284 if (RemIdx == 0 && ExactlyVecRegSized)

1285 return true;

1286

1287

1288

1289

1290

1291

1292

1293

1294

1295

1296

1297

1298

1299

1300

1301 const LLT XLenTy(STI.getXLenVT());

1302 LLT InterLitTy = BigTy;

1303 Register AlignedExtract = BigVec;

1304 unsigned AlignedIdx = Idx - RemIdx;

1306 getLMUL1Ty(BigTy).getSizeInBits())) {

1308

1309

1310 AlignedExtract =

1312 }

1313

1315 LitVec, 0);

1316

1319

1320

1321

1322 MachineInstrBuilder Inserted;

1323 bool NeedInsertSubvec =

1326 NeedInsertSubvec ? MRI.createGenericVirtualRegister(InterLitTy) : Dst;

1327 if (RemIdx == 0) {

1329 {AlignedExtract, Insert, VL});

1330 } else {

1331 auto SlideupAmt = MIB.buildVScale(XLenTy, RemIdx);

1332

1333 VL = MIB.buildAdd(XLenTy, SlideupAmt, VL);

1334

1335 ElementCount EndIndex =

1338 if (STI.expandVScale(EndIndex) ==

1341

1343 MIB.buildInstr(RISCV::G_VSLIDEUP_VL, {InsertedDst},

1344 {AlignedExtract, Insert, SlideupAmt, Mask, VL, Policy});

1345 }

1346

1347

1348

1349 if (NeedInsertSubvec)

1351

1352 MI.eraseFromParent();

1353 return true;

1354}

1355

1357 switch (Opcode) {

1358 default:

1360 case TargetOpcode::G_ASHR:

1361 return RISCV::G_SRAW;

1362 case TargetOpcode::G_LSHR:

1363 return RISCV::G_SRLW;

1364 case TargetOpcode::G_SHL:

1365 return RISCV::G_SLLW;

1366 case TargetOpcode::G_SDIV:

1367 return RISCV::G_DIVW;

1368 case TargetOpcode::G_UDIV:

1369 return RISCV::G_DIVUW;

1370 case TargetOpcode::G_UREM:

1371 return RISCV::G_REMUW;

1372 case TargetOpcode::G_ROTL:

1373 return RISCV::G_ROLW;

1374 case TargetOpcode::G_ROTR:

1375 return RISCV::G_RORW;

1376 case TargetOpcode::G_CTLZ:

1377 return RISCV::G_CLZW;

1378 case TargetOpcode::G_CTTZ:

1379 return RISCV::G_CTZW;

1380 case TargetOpcode::G_FPTOSI:

1381 return RISCV::G_FCVT_W_RV64;

1382 case TargetOpcode::G_FPTOUI:

1383 return RISCV::G_FCVT_WU_RV64;

1384 }

1385}

1386

1393 switch (MI.getOpcode()) {

1394 default:

1395

1396 return false;

1397 case TargetOpcode::G_ABS:

1399 case TargetOpcode::G_FCONSTANT: {

1400 const APFloat &FVal = MI.getOperand(1).getFPImm()->getValueAPF();

1401

1402

1403 Register DstReg = MI.getOperand(0).getReg();

1405

1406 MI.eraseFromParent();

1407 return true;

1408 }

1409 case TargetOpcode::G_CONSTANT: {

1411

1412

1413 bool ShouldOptForSize = F.hasOptSize();

1414 const ConstantInt *ConstVal = MI.getOperand(1).getCImm();

1415 if (!shouldBeInConstantPool(ConstVal->getValue(), ShouldOptForSize))

1416 return true;

1418 }

1419 case TargetOpcode::G_SUB:

1420 case TargetOpcode::G_ADD: {

1422 Helper.widenScalarSrc(MI, sXLen, 1, TargetOpcode::G_ANYEXT);

1423 Helper.widenScalarSrc(MI, sXLen, 2, TargetOpcode::G_ANYEXT);

1424

1425 Register DstALU = MRI.createGenericVirtualRegister(sXLen);

1426

1429 auto DstSext = MIRBuilder.buildSExtInReg(sXLen, DstALU, 32);

1430

1431 MIRBuilder.buildInstr(TargetOpcode::G_TRUNC, {MO}, {DstSext});

1433

1435 return true;

1436 }

1437 case TargetOpcode::G_SEXT_INREG: {

1438 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());

1439 int64_t SizeInBits = MI.getOperand(2).getImm();

1440

1441 if (DstTy.getSizeInBits() == 64 && SizeInBits == 32)

1442 return true;

1443

1444 if (STI.hasStdExtZbb() && (SizeInBits == 8 || SizeInBits == 16))

1445 return true;

1446

1447 return Helper.lower(MI, 0, LLT()) ==

1449 }

1450 case TargetOpcode::G_ASHR:

1451 case TargetOpcode::G_LSHR:

1452 case TargetOpcode::G_SHL: {

1454

1455

1456 unsigned ExtOpc = TargetOpcode::G_ANYEXT;

1457 if (MI.getOpcode() == TargetOpcode::G_ASHR)

1458 ExtOpc = TargetOpcode::G_SEXT;

1459 else if (MI.getOpcode() == TargetOpcode::G_LSHR)

1460 ExtOpc = TargetOpcode::G_ZEXT;

1461

1467 return true;

1468 }

1469

1471 Helper.widenScalarSrc(MI, sXLen, 1, TargetOpcode::G_ANYEXT);

1472 Helper.widenScalarSrc(MI, sXLen, 2, TargetOpcode::G_ANYEXT);

1476 return true;

1477 }

1478 case TargetOpcode::G_SDIV:

1479 case TargetOpcode::G_UDIV:

1480 case TargetOpcode::G_UREM:

1481 case TargetOpcode::G_ROTL:

1482 case TargetOpcode::G_ROTR: {

1484 Helper.widenScalarSrc(MI, sXLen, 1, TargetOpcode::G_ANYEXT);

1485 Helper.widenScalarSrc(MI, sXLen, 2, TargetOpcode::G_ANYEXT);

1489 return true;

1490 }

1491 case TargetOpcode::G_CTLZ:

1492 case TargetOpcode::G_CTTZ: {

1494 Helper.widenScalarSrc(MI, sXLen, 1, TargetOpcode::G_ANYEXT);

1498 return true;

1499 }

1500 case TargetOpcode::G_FPTOSI:

1501 case TargetOpcode::G_FPTOUI: {

1507 return true;

1508 }

1509 case TargetOpcode::G_IS_FPCLASS: {

1510 Register GISFPCLASS = MI.getOperand(0).getReg();

1511 Register Src = MI.getOperand(1).getReg();

1514

1515

1516

1520

1521 auto GFClass = MIB.buildInstr(RISCV::G_FCLASS, {sXLen}, {Src});

1522 auto And = MIB.buildAnd(sXLen, GFClass, FClassMask);

1524

1525 MI.eraseFromParent();

1526 return true;

1527 }

1528 case TargetOpcode::G_BRJT:

1529 return legalizeBRJT(MI, MIRBuilder);

1530 case TargetOpcode::G_VASTART:

1531 return legalizeVAStart(MI, MIRBuilder);

1532 case TargetOpcode::G_VSCALE:

1533 return legalizeVScale(MI, MIRBuilder);

1534 case TargetOpcode::G_ZEXT:

1535 case TargetOpcode::G_SEXT:

1536 case TargetOpcode::G_ANYEXT:

1537 return legalizeExt(MI, MIRBuilder);

1538 case TargetOpcode::G_SPLAT_VECTOR:

1539 return legalizeSplatVector(MI, MIRBuilder);

1540 case TargetOpcode::G_EXTRACT_SUBVECTOR:

1541 return legalizeExtractSubvector(MI, MIRBuilder);

1542 case TargetOpcode::G_INSERT_SUBVECTOR:

1543 return legalizeInsertSubvector(MI, Helper, MIRBuilder);

1544 case TargetOpcode::G_LOAD:

1545 case TargetOpcode::G_STORE:

1546 return legalizeLoadStore(MI, Helper, MIRBuilder);

1547 }

1548

1550}

unsigned const MachineRegisterInfo * MRI

assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")

MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL

Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...

This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...

This file declares the MachineIRBuilder class.

Register const TargetRegisterInfo * TRI

Promote Memory to Register

uint64_t IntrinsicInst * II

static LLT getLMUL1Ty(LLT VecTy)

Definition RISCVLegalizerInfo.cpp:1130

static MachineInstrBuilder buildAllOnesMask(LLT VecTy, const SrcOp &VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)

Creates an all ones mask suitable for masking a vector of type VecTy with vector length VL.

Definition RISCVLegalizerInfo.cpp:1029

static std::pair< MachineInstrBuilder, MachineInstrBuilder > buildDefaultVLOps(LLT VecTy, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)

Gets the two common "VL" operands: an all-ones mask and the vector length.

Definition RISCVLegalizerInfo.cpp:1039

static LegalityPredicate typeIsLegalBoolVec(unsigned TypeIdx, std::initializer_list< LLT > BoolVecTys, const RISCVSubtarget &ST)

Definition RISCVLegalizerInfo.cpp:53

static MachineInstrBuilder buildSplatSplitS64WithVL(const DstOp &Dst, const SrcOp &Passthru, const SrcOp &Scalar, const SrcOp &VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)

Definition RISCVLegalizerInfo.cpp:1065

static LegalityPredicate typeIsLegalIntOrFPVec(unsigned TypeIdx, std::initializer_list< LLT > IntOrFPVecTys, const RISCVSubtarget &ST)

Definition RISCVLegalizerInfo.cpp:38

static MachineInstrBuilder buildSplatPartsS64WithVL(const DstOp &Dst, const SrcOp &Passthru, Register Lo, Register Hi, const SrcOp &VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)

Definition RISCVLegalizerInfo.cpp:1049

static LLT getMaskTypeFor(LLT VecTy)

Return the type of the mask type suitable for masking the provided vector type.

Definition RISCVLegalizerInfo.cpp:1021

static LegalityPredicate typeIsLegalPtrVec(unsigned TypeIdx, std::initializer_list< LLT > PtrVecTys, const RISCVSubtarget &ST)

Definition RISCVLegalizerInfo.cpp:63

static unsigned getRISCVWOpcode(unsigned Opcode)

Definition RISCVLegalizerInfo.cpp:1356

This file declares the targeting of the Machinelegalizer class for RISC-V.

APInt bitcastToAPInt() const

Class for arbitrary precision integers.

LLVM_ABI APInt zext(unsigned width) const

Zero extend to a new width.

unsigned getBitWidth() const

Return the number of bits in the APInt.

LLVM_ABI APInt rotr(unsigned rotateAmt) const

Rotate right by rotateAmt.

int64_t getSExtValue() const

Get sign extended value.

This is the shared class of boolean and integer constants.

const APInt & getValue() const

Return the constant as an APInt value reference.

A parsed version of the target data layout string in and methods for querying it.

static constexpr ElementCount getScalable(ScalarTy MinVal)

LLVMContext & getContext() const

getContext - Return a reference to the LLVMContext associated with this function.

virtual void changingInstr(MachineInstr &MI)=0

This instruction is about to be mutated in some way.

virtual void changedInstr(MachineInstr &MI)=0

This instruction was mutated in some way.

Register getSubVec() const

Register getBigVec() const

uint64_t getIndexImm() const

Register getReg(unsigned Idx) const

Access the Idx'th operand as a register and return it.

constexpr bool isScalableVector() const

Returns true if the LLT is a scalable vector.

constexpr unsigned getScalarSizeInBits() const

static constexpr LLT scalable_vector(unsigned MinNumElements, unsigned ScalarSizeInBits)

Get a low-level scalable vector of some number of elements and element width.

constexpr LLT changeElementType(LLT NewEltTy) const

If this type is a vector, return a vector with the same number of elements but the new element type.

static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)

Get a low-level vector of some number of elements and element width.

static constexpr LLT scalar(unsigned SizeInBits)

Get a low-level scalar or aggregate "bag of bits".

constexpr bool isVector() const

static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)

Get a low-level pointer in the given address space.

constexpr TypeSize getSizeInBits() const

Returns the total size of the type. Must only be called on sized types.

constexpr LLT getElementType() const

Returns the vector's element type. Only valid for vector types.

constexpr ElementCount getElementCount() const

This is an important class for using LLVM in a threaded context.

LegalizeRuleSet & maxScalar(unsigned TypeIdx, const LLT Ty)

Ensure the scalar is at most as wide as Ty.

LegalizeRuleSet & lower()

The instruction is lowered.

LegalizeRuleSet & clampScalar(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)

Limit the range of scalar sizes to MinTy and MaxTy.

LegalizeRuleSet & customIf(LegalityPredicate Predicate)

LegalizeRuleSet & widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize=0)

Widen the scalar to the next power of two that is at least MinSize.

LegalizeRuleSet & customFor(std::initializer_list< LLT > Types)

LLVM_ABI void widenScalarSrc(MachineInstr &MI, LLT WideTy, unsigned OpIdx, unsigned ExtOpcode)

Legalize a single operand OpIdx of the machine instruction MI as a Use by extending the operand's typ...

LLVM_ABI LegalizeResult lowerAbsToMaxNeg(MachineInstr &MI)

LLVM_ABI LegalizeResult bitcast(MachineInstr &MI, unsigned TypeIdx, LLT Ty)

Legalize an instruction by replacing the value type.

@ Legalized

Instruction has been legalized and the MachineFunction changed.

LLVM_ABI LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty)

Legalize an instruction by splitting it into simpler parts, hopefully understood by the target.

GISelChangeObserver & Observer

To keep track of changes made by the LegalizerHelper.

LLVM_ABI LegalizeResult widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy)

Legalize an instruction by performing the operation on a wider scalar type (for example a 16-bit addi...

MachineIRBuilder & MIRBuilder

Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.

LLVM_ABI LegalizeResult lowerConstant(MachineInstr &MI)

LLVM_ABI void widenScalarDst(MachineInstr &MI, LLT WideTy, unsigned OpIdx=0, unsigned TruncOpcode=TargetOpcode::G_TRUNC)

Legalize a single operand OpIdx of the machine instruction MI as a Def by extending the operand's typ...

LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)

Get the action definition builder for the given opcode.

const MCInstrDesc & get(unsigned Opcode) const

Return the machine instruction descriptor that corresponds to the specified instruction opcode.

const TargetSubtargetInfo & getSubtarget() const

getSubtarget - Return the subtarget for which this machine code is being compiled.

MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)

getMachineMemOperand - Allocate a new MachineMemOperand.

const DataLayout & getDataLayout() const

Return the DataLayout attached to the Module associated to this MF.

Function & getFunction()

Return the LLVM function that this machine code represents.

Ty * getInfo()

getInfo - Keep track of various per-function pieces of information for backends that would like to do...

const MachineJumpTableInfo * getJumpTableInfo() const

getJumpTableInfo - Return the jump table info object for the current function.

Helper class to build MachineInstr.

void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II)

Set the insertion point before the specified position.

MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_ADD Op0, Op1.

MachineInstrBuilder buildUndef(const DstOp &Res)

Build and insert Res = IMPLICIT_DEF.

MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)

Build and insert Res0, ... = G_UNMERGE_VALUES Op.

MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)

Build and insert a Res = G_SELECT Tst, Op0, Op1.

MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_MUL Op0, Op1.

MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)

Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.

MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)

Build and insert Res = G_AND Op0, Op1.

const TargetInstrInfo & getTII()

MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)

Build and insert a Res = G_ICMP Pred, Op0, Op1.

MachineInstrBuilder buildLShr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)

MachineBasicBlock::iterator getInsertPt()

Current insertion point for new instructions.

MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_ZEXT Op.

MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)

Build and insert Res = G_VSCALE MinElts.

MachineInstrBuilder buildIntToPtr(const DstOp &Dst, const SrcOp &Src)

Build and insert a G_INTTOPTR instruction.

MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)

Build and insert Res = G_LOAD Addr, MMO.

MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)

Build and insert Res = G_PTR_ADD Op0, Op1.

MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)

MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)

Build and insert G_STORE Val, Addr, MMO.

MachineInstrBuilder buildInstr(unsigned Opcode)

Build and insert = Opcode .

MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)

Build and insert Res = G_FRAME_INDEX Idx.

MachineFunction & getMF()

Getter for the function we currently build.

const MachineBasicBlock & getMBB() const

Getter for the basic block we currently build.

MachineRegisterInfo * getMRI()

Getter for MRI.

MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)

Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.

const DataLayout & getDataLayout() const

MachineInstrBuilder buildBrIndirect(Register Tgt)

Build and insert G_BRINDIRECT Tgt.

MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)

Build and insert Res = G_SPLAT_VECTOR Val.

MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)

Build and insert Res = Addr, MMO.

virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)

Build and insert Res = G_CONSTANT Val.

MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)

Build and insert Res = G_SEXT_INREG Op, ImmOp.

Register getReg(unsigned Idx) const

Get the register for the operand index.

Representation of each machine instruction.

LLVM_ABI unsigned getEntrySize(const DataLayout &TD) const

getEntrySize - Return the size of each entry in the jump table.

@ EK_LabelDifference32

EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.

@ EK_Custom32

EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...

@ EK_BlockAddress

EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.

LLVM_ABI unsigned getEntryAlignment(const DataLayout &TD) const

getEntryAlignment - Return the alignment of each entry in the jump table.

JTEntryKind getEntryKind() const

A description of a memory reference used in the backend.

@ MOLoad

The memory access reads data.

@ MOStore

The memory access writes data.

MachineOperand class - Representation of each machine instruction operand.

LLVM_ABI void setReg(Register Reg)

Change the register this operand corresponds to.

static MachineOperand CreateImm(int64_t Val)

MachineRegisterInfo - Keep track of information for virtual and physical registers,...

LLT getType(Register Reg) const

Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.

bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI, LostDebugLocObserver &LocObserver) const override

Called for instructions with the Custom LegalizationAction.

Definition RISCVLegalizerInfo.cpp:1387

bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override

Definition RISCVLegalizerInfo.cpp:739

RISCVLegalizerInfo(const RISCVSubtarget &ST)

Definition RISCVLegalizerInfo.cpp:76

RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...

int getVarArgsFrameIndex() const

static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)

static RISCVVType::VLMUL getLMUL(MVT VT)

Wrapper class representing virtual and physical registers.

static constexpr TypeSize getScalable(ScalarTy MinimumSize)

constexpr ScalarTy getKnownMinValue() const

Returns the minimum value this quantity can represent.

static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)

constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const

We do not provide the '/' operator here because division for polynomial types does not work in the sa...

#define llvm_unreachable(msg)

Marks that the current location is not supposed to be reachable.

constexpr std::underlying_type_t< E > Mask()

Get a bitmask with 1s in all places up to the high-order bit of E's largest value.

LLVM_ABI LegalityPredicate typeInSet(unsigned TypeIdx, std::initializer_list< LLT > TypesInit)

True iff the given type index is one of the specified types.

Predicate any(Predicate P0, Predicate P1)

True iff P0 or P1 are true.

Predicate all(Predicate P0, Predicate P1)

True iff P0 and P1 are true.

LLVM_ABI LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit)

True iff the given type index is the specified type.

LLVM_ABI LegalizeMutation changeTo(unsigned TypeIdx, LLT Ty)

Select this specific type for the given type index.

InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)

InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)

SmallVector< Inst, 8 > InstSeq

@ TAIL_UNDISTURBED_MASK_UNDISTURBED

LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)

static constexpr unsigned RVVBitsPerBlock

Invariant opcodes: All instruction sets have these as their low opcodes.

This is an optimization pass for GlobalISel generic memory operations.

LLVM_ABI Type * getTypeForLLT(LLT Ty, LLVMContext &C)

Get the type back from LLT.

constexpr bool isInt(int64_t x)

Checks if an integer fits into the given bit width.

LLVM_ABI bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)

Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...

LLVM_ABI MVT getMVTForLLT(LLT Ty)

Get a rough equivalent of an MVT for a given LLT.

constexpr bool isPowerOf2_64(uint64_t Value)

Return true if the argument is a power of two > 0 (64 bit edition.)

LLVM_ABI bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)

Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...

unsigned Log2_64(uint64_t Value)

Return the floor log base 2 of the specified value, -1 if the value is zero.

unsigned Log2_32(uint32_t Value)

Return the floor log base 2 of the specified value, -1 if the value is zero.

std::function< bool(const LegalityQuery &)> LegalityPredicate

constexpr bool isPowerOf2_32(uint32_t Value)

Return true if the argument is a power of two > 0.

bool isa(const From &Val)

isa - Return true if the parameter to the template is an instance of one of the template type argu...

@ And

Bitwise or logical AND of integers.

DWARFExpression::Operation Op

decltype(auto) cast(const From &Val)

cast - Return the argument parameter cast to the specified type.

LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)

If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...

unsigned Log2(Align A)

Returns the log2 of the alignment.

This struct is a compact representation of a valid (non-zero power of two) alignment.

static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)

Return the value type corresponding to the specified type.

The LegalityQuery object bundles together all the information that's needed to decide whether a given...

This class contains a discriminated union of information about pointers in memory operands,...

static LLVM_ABI MachinePointerInfo getJumpTable(MachineFunction &MF)

Return a MachinePointerInfo record that refers to a jump table entry.