LLVM: lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
30#include "llvm/IR/IntrinsicsRISCV.h"
32
33using namespace llvm;
36
39 std::initializer_list IntOrFPVecTys,
42 return ST.hasVInstructions() &&
43 (Query.Types[TypeIdx].getScalarSizeInBits() != 64 ||
44 ST.hasVInstructionsI64()) &&
45 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
46 ST.getELen() == 64);
47 };
48
50}
51
56 return ST.hasVInstructions() &&
57 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
58 ST.getELen() == 64);
59 };
61}
62
64 std::initializer_list PtrVecTys,
67 return ST.hasVInstructions() &&
68 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
69 ST.getELen() == 64) &&
70 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 16 ||
71 Query.Types[TypeIdx].getScalarSizeInBits() == 32);
72 };
74}
75
77 : STI(ST), XLen(STI.getXLen()), sXLen(LLT::scalar(XLen)) {
86
94
102
109
115
120
126
128
129 auto BoolVecTys = {nxv1s1, nxv2s1, nxv4s1, nxv8s1, nxv16s1, nxv32s1, nxv64s1};
130
131 auto IntOrFPVecTys = {nxv1s8, nxv2s8, nxv4s8, nxv8s8, nxv16s8, nxv32s8,
132 nxv64s8, nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16,
133 nxv32s16, nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
134 nxv1s64, nxv2s64, nxv4s64, nxv8s64};
135
136 auto PtrVecTys = {nxv1p0, nxv2p0, nxv4p0, nxv8p0, nxv16p0};
137
139 .legalFor({sXLen})
144
146 .legalFor({sXLen})
150
152 {G_UADDE, G_UADDO, G_USUBE, G_USUBO}).lower();
153
155 .minScalar(0, sXLen)
157
158
160 {G_UADDSAT, G_SADDSAT, G_USUBSAT, G_SSUBSAT, G_SSHLSAT, G_USHLSAT})
161 .lower();
162
164 .legalFor({{sXLen, sXLen}})
165 .customFor(ST.is64Bit(), {{s32, s32}})
166 .widenScalarToNextPow2(0)
169
170 getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
171 .legalFor({{s32, s16}})
172 .legalFor(ST.is64Bit(), {{s64, s16}, {s64, s32}})
177
178 getActionDefinitionsBuilder(G_SEXT_INREG)
180 .clampScalar(0, sXLen, sXLen)
182
183
184 for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
185 auto &MergeUnmergeActions = getActionDefinitionsBuilder(Op);
186 unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
187 unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
188 if (XLen == 32 && ST.hasStdExtD()) {
189 MergeUnmergeActions.legalIf(
191 }
192 MergeUnmergeActions.widenScalarToNextPow2(LitTyIdx, XLen)
193 .widenScalarToNextPow2(BigTyIdx, XLen)
194 .clampScalar(LitTyIdx, sXLen, sXLen)
195 .clampScalar(BigTyIdx, sXLen, sXLen);
196 }
197
198 getActionDefinitionsBuilder({G_FSHL, G_FSHR}).lower();
199
200 getActionDefinitionsBuilder({G_ROTR, G_ROTL})
201 .legalFor(ST.hasStdExtZbb() || ST.hasStdExtZbkb(), {{sXLen, sXLen}})
202 .customFor(ST.is64Bit() && (ST.hasStdExtZbb() || ST.hasStdExtZbkb()),
203 {{s32, s32}})
204 .lower();
205
206 getActionDefinitionsBuilder(G_BITREVERSE).maxScalar(0, sXLen).lower();
207
208 getActionDefinitionsBuilder(G_BITCAST).legalIf(
213
214 auto &BSWAPActions = getActionDefinitionsBuilder(G_BSWAP);
215 if (ST.hasStdExtZbb() || ST.hasStdExtZbkb())
216 BSWAPActions.legalFor({sXLen}).clampScalar(0, sXLen, sXLen);
217 else
218 BSWAPActions.maxScalar(0, sXLen).lower();
219
220 auto &CountZerosActions = getActionDefinitionsBuilder({G_CTLZ, G_CTTZ});
221 auto &CountZerosUndefActions =
222 getActionDefinitionsBuilder({G_CTLZ_ZERO_UNDEF, G_CTTZ_ZERO_UNDEF});
223 if (ST.hasStdExtZbb()) {
224 CountZerosActions.legalFor({{sXLen, sXLen}})
225 .customFor({{s32, s32}})
226 .clampScalar(0, s32, sXLen)
227 .widenScalarToNextPow2(0)
228 .scalarSameSizeAs(1, 0);
229 } else {
230 CountZerosActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0).lower();
231 CountZerosUndefActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0);
232 }
233 CountZerosUndefActions.lower();
234
235 auto &CTPOPActions = getActionDefinitionsBuilder(G_CTPOP);
236 if (ST.hasStdExtZbb()) {
237 CTPOPActions.legalFor({{sXLen, sXLen}})
238 .clampScalar(0, sXLen, sXLen)
239 .scalarSameSizeAs(1, 0);
240 } else {
241 CTPOPActions.widenScalarToNextPow2(0, 8)
242 .clampScalar(0, s8, sXLen)
243 .scalarSameSizeAs(1, 0)
244 .lower();
245 }
246
247 getActionDefinitionsBuilder(G_CONSTANT)
248 .legalFor({p0})
249 .legalFor(.is64Bit(), {s32})
250 .customFor(ST.is64Bit(), {s64})
251 .widenScalarToNextPow2(0)
252 .clampScalar(0, sXLen, sXLen);
253
254
255 getActionDefinitionsBuilder(G_FREEZE)
256 .legalFor({s16, s32, p0})
257 .legalFor(ST.is64Bit(), {s64})
260 .widenScalarToNextPow2(0)
261 .clampScalar(0, s16, sXLen);
262
263
264
265 getActionDefinitionsBuilder(
266 {G_IMPLICIT_DEF, G_CONSTANT_FOLD_BARRIER})
267 .legalFor({s32, sXLen, p0})
270 .widenScalarToNextPow2(0)
271 .clampScalar(0, s32, sXLen);
272
273 getActionDefinitionsBuilder(G_ICMP)
274 .legalFor({{sXLen, sXLen}, {sXLen, p0}})
277 .widenScalarOrEltToNextPow2OrMinSize(1, 8)
278 .clampScalar(1, sXLen, sXLen)
279 .clampScalar(0, sXLen, sXLen);
280
281 getActionDefinitionsBuilder(G_SELECT)
282 .legalFor({{s32, sXLen}, {p0, sXLen}})
285 .legalFor(XLen == 64 || ST.hasStdExtD(), {{s64, sXLen}})
286 .widenScalarToNextPow2(0)
287 .clampScalar(0, s32, (XLen == 64 || ST.hasStdExtD()) ? s64 : s32)
288 .clampScalar(1, sXLen, sXLen);
289
290 auto &LoadActions = getActionDefinitionsBuilder(G_LOAD);
291 auto &StoreActions = getActionDefinitionsBuilder(G_STORE);
292 auto &ExtLoadActions = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD});
293
294
295
296
297 auto getScalarMemAlign = [&ST](unsigned Size) {
298 return ST.enableUnalignedScalarMem() ? 8 : Size;
299 };
300
301 LoadActions.legalForTypesWithMemDesc(
302 {{s16, p0, s8, getScalarMemAlign(8)},
303 {s32, p0, s8, getScalarMemAlign(8)},
304 {s16, p0, s16, getScalarMemAlign(16)},
305 {s32, p0, s16, getScalarMemAlign(16)},
306 {s32, p0, s32, getScalarMemAlign(32)},
307 {p0, p0, sXLen, getScalarMemAlign(XLen)}});
308 StoreActions.legalForTypesWithMemDesc(
309 {{s16, p0, s8, getScalarMemAlign(8)},
310 {s32, p0, s8, getScalarMemAlign(8)},
311 {s16, p0, s16, getScalarMemAlign(16)},
312 {s32, p0, s16, getScalarMemAlign(16)},
313 {s32, p0, s32, getScalarMemAlign(32)},
314 {p0, p0, sXLen, getScalarMemAlign(XLen)}});
315 ExtLoadActions.legalForTypesWithMemDesc(
316 {{sXLen, p0, s8, getScalarMemAlign(8)},
317 {sXLen, p0, s16, getScalarMemAlign(16)}});
318 if (XLen == 64) {
319 LoadActions.legalForTypesWithMemDesc(
320 {{s64, p0, s8, getScalarMemAlign(8)},
321 {s64, p0, s16, getScalarMemAlign(16)},
322 {s64, p0, s32, getScalarMemAlign(32)},
323 {s64, p0, s64, getScalarMemAlign(64)}});
324 StoreActions.legalForTypesWithMemDesc(
325 {{s64, p0, s8, getScalarMemAlign(8)},
326 {s64, p0, s16, getScalarMemAlign(16)},
327 {s64, p0, s32, getScalarMemAlign(32)},
328 {s64, p0, s64, getScalarMemAlign(64)}});
329 ExtLoadActions.legalForTypesWithMemDesc(
330 {{s64, p0, s32, getScalarMemAlign(32)}});
331 } else if (ST.hasStdExtD()) {
332 LoadActions.legalForTypesWithMemDesc(
333 {{s64, p0, s64, getScalarMemAlign(64)}});
334 StoreActions.legalForTypesWithMemDesc(
335 {{s64, p0, s64, getScalarMemAlign(64)}});
336 }
337
338
339 if (ST.hasVInstructions()) {
340 LoadActions.legalForTypesWithMemDesc({{nxv2s8, p0, nxv2s8, 8},
341 {nxv4s8, p0, nxv4s8, 8},
342 {nxv8s8, p0, nxv8s8, 8},
343 {nxv16s8, p0, nxv16s8, 8},
344 {nxv32s8, p0, nxv32s8, 8},
345 {nxv64s8, p0, nxv64s8, 8},
346 {nxv2s16, p0, nxv2s16, 16},
347 {nxv4s16, p0, nxv4s16, 16},
348 {nxv8s16, p0, nxv8s16, 16},
349 {nxv16s16, p0, nxv16s16, 16},
350 {nxv32s16, p0, nxv32s16, 16},
351 {nxv2s32, p0, nxv2s32, 32},
352 {nxv4s32, p0, nxv4s32, 32},
353 {nxv8s32, p0, nxv8s32, 32},
354 {nxv16s32, p0, nxv16s32, 32}});
355 StoreActions.legalForTypesWithMemDesc({{nxv2s8, p0, nxv2s8, 8},
356 {nxv4s8, p0, nxv4s8, 8},
357 {nxv8s8, p0, nxv8s8, 8},
358 {nxv16s8, p0, nxv16s8, 8},
359 {nxv32s8, p0, nxv32s8, 8},
360 {nxv64s8, p0, nxv64s8, 8},
361 {nxv2s16, p0, nxv2s16, 16},
362 {nxv4s16, p0, nxv4s16, 16},
363 {nxv8s16, p0, nxv8s16, 16},
364 {nxv16s16, p0, nxv16s16, 16},
365 {nxv32s16, p0, nxv32s16, 16},
366 {nxv2s32, p0, nxv2s32, 32},
367 {nxv4s32, p0, nxv4s32, 32},
368 {nxv8s32, p0, nxv8s32, 32},
369 {nxv16s32, p0, nxv16s32, 32}});
370
371 if (ST.getELen() == 64) {
372 LoadActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},
373 {nxv1s16, p0, nxv1s16, 16},
374 {nxv1s32, p0, nxv1s32, 32}});
375 StoreActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},
376 {nxv1s16, p0, nxv1s16, 16},
377 {nxv1s32, p0, nxv1s32, 32}});
378 }
379
380 if (ST.hasVInstructionsI64()) {
381 LoadActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},
382 {nxv2s64, p0, nxv2s64, 64},
383 {nxv4s64, p0, nxv4s64, 64},
384 {nxv8s64, p0, nxv8s64, 64}});
385 StoreActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},
386 {nxv2s64, p0, nxv2s64, 64},
387 {nxv4s64, p0, nxv4s64, 64},
388 {nxv8s64, p0, nxv8s64, 64}});
389 }
390
391
392
395
396
397 if (XLen <= ST.getELen()) {
400 }
401 }
402
403 LoadActions.widenScalarToNextPow2(0, 8)
404 .lowerIfMemSizeNotByteSizePow2()
405 .clampScalar(0, s16, sXLen)
406 .lower();
407 StoreActions
408 .clampScalar(0, s16, sXLen)
409 .lowerIfMemSizeNotByteSizePow2()
410 .lower();
411
412 ExtLoadActions.widenScalarToNextPow2(0).clampScalar(0, sXLen, sXLen).lower();
413
414 getActionDefinitionsBuilder({G_PTR_ADD, G_PTRMASK}).legalFor({{p0, sXLen}});
415
416 getActionDefinitionsBuilder(G_PTRTOINT)
417 .legalFor({{sXLen, p0}})
418 .clampScalar(0, sXLen, sXLen);
419
420 getActionDefinitionsBuilder(G_INTTOPTR)
421 .legalFor({{p0, sXLen}})
422 .clampScalar(1, sXLen, sXLen);
423
424 getActionDefinitionsBuilder(G_BRCOND).legalFor({sXLen}).minScalar(0, sXLen);
425
426 getActionDefinitionsBuilder(G_BRJT).customFor({{p0, sXLen}});
427
428 getActionDefinitionsBuilder(G_BRINDIRECT).legalFor({p0});
429
430 getActionDefinitionsBuilder(G_PHI)
431 .legalFor({p0, s32, sXLen})
432 .widenScalarToNextPow2(0)
433 .clampScalar(0, s32, sXLen);
434
435 getActionDefinitionsBuilder({G_GLOBAL_VALUE, G_JUMP_TABLE, G_CONSTANT_POOL})
436 .legalFor({p0});
437
438 if (ST.hasStdExtZmmul()) {
439 getActionDefinitionsBuilder(G_MUL)
440 .legalFor({sXLen})
441 .widenScalarToNextPow2(0)
442 .clampScalar(0, sXLen, sXLen);
443
444
445 getActionDefinitionsBuilder({G_SMULH, G_UMULH})
446 .legalFor({sXLen})
447 .lower();
448
449
450 getActionDefinitionsBuilder({G_SMULO, G_UMULO}).minScalar(0, sXLen).lower();
451 } else {
452 getActionDefinitionsBuilder(G_MUL)
453 .libcallFor({sXLen, sDoubleXLen})
454 .widenScalarToNextPow2(0)
455 .clampScalar(0, sXLen, sDoubleXLen);
456
457 getActionDefinitionsBuilder({G_SMULH, G_UMULH}).lowerFor({sXLen});
458
459 getActionDefinitionsBuilder({G_SMULO, G_UMULO})
460 .minScalar(0, sXLen)
461
462
463
464 .widenScalarIf(typeIs(0, sXLen),
466 .lower();
467 }
468
469 if (ST.hasStdExtM()) {
470 getActionDefinitionsBuilder({G_SDIV, G_UDIV, G_UREM})
471 .legalFor({sXLen})
472 .customFor({s32})
473 .libcallFor({sDoubleXLen})
474 .clampScalar(0, s32, sDoubleXLen)
475 .widenScalarToNextPow2(0);
476 getActionDefinitionsBuilder(G_SREM)
477 .legalFor({sXLen})
478 .libcallFor({sDoubleXLen})
479 .clampScalar(0, sXLen, sDoubleXLen)
480 .widenScalarToNextPow2(0);
481 } else {
482 getActionDefinitionsBuilder({G_UDIV, G_SDIV, G_UREM, G_SREM})
483 .libcallFor({sXLen, sDoubleXLen})
484 .clampScalar(0, sXLen, sDoubleXLen)
485 .widenScalarToNextPow2(0);
486 }
487
488
489 getActionDefinitionsBuilder({G_SDIVREM, G_UDIVREM}).lower();
490
491 getActionDefinitionsBuilder(G_ABS)
492 .customFor(ST.hasStdExtZbb(), {sXLen})
493 .minScalar(ST.hasStdExtZbb(), 0, sXLen)
494 .lower();
495
496 getActionDefinitionsBuilder({G_ABDS, G_ABDU})
497 .minScalar(ST.hasStdExtZbb(), 0, sXLen)
498 .lower();
499
500 getActionDefinitionsBuilder({G_UMAX, G_UMIN, G_SMAX, G_SMIN})
501 .legalFor(ST.hasStdExtZbb(), {sXLen})
502 .minScalar(ST.hasStdExtZbb(), 0, sXLen)
503 .lower();
504
505 getActionDefinitionsBuilder({G_SCMP, G_UCMP}).lower();
506
507 getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0});
508
509 getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE, G_MEMSET}).libcall();
510
511 getActionDefinitionsBuilder({G_DYN_STACKALLOC, G_STACKSAVE, G_STACKRESTORE})
512 .lower();
513
514
515
516
517 getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMA, G_FSQRT,
518 G_FMAXNUM, G_FMINNUM, G_FMAXIMUMNUM,
519 G_FMINIMUMNUM})
520 .legalFor(ST.hasStdExtF(), {s32})
521 .legalFor(ST.hasStdExtD(), {s64})
522 .legalFor(ST.hasStdExtZfh(), {s16})
523 .libcallFor({s32, s64})
524 .libcallFor(ST.is64Bit(), {s128});
525
526 getActionDefinitionsBuilder({G_FNEG, G_FABS})
527 .legalFor(ST.hasStdExtF(), {s32})
528 .legalFor(ST.hasStdExtD(), {s64})
529 .legalFor(ST.hasStdExtZfh(), {s16})
530 .lowerFor({s32, s64, s128});
531
532 getActionDefinitionsBuilder(G_FREM)
533 .libcallFor({s32, s64})
534 .libcallFor(ST.is64Bit(), {s128})
535 .minScalar(0, s32)
536 .scalarize(0);
537
538 getActionDefinitionsBuilder(G_FCOPYSIGN)
539 .legalFor(ST.hasStdExtF(), {{s32, s32}})
540 .legalFor(ST.hasStdExtD(), {{s64, s64}, {s32, s64}, {s64, s32}})
541 .legalFor(ST.hasStdExtZfh(), {{s16, s16}, {s16, s32}, {s32, s16}})
542 .legalFor(ST.hasStdExtZfh() && ST.hasStdExtD(), {{s16, s64}, {s64, s16}})
543 .lower();
544
545
546 getActionDefinitionsBuilder(G_FPTRUNC)
547 .legalFor(ST.hasStdExtD(), {{s32, s64}})
548 .legalFor(ST.hasStdExtZfh(), {{s16, s32}})
549 .legalFor(ST.hasStdExtZfh() && ST.hasStdExtD(), {{s16, s64}})
550 .libcallFor({{s32, s64}})
551 .libcallFor(ST.is64Bit(), {{s32, s128}, {s64, s128}});
552 getActionDefinitionsBuilder(G_FPEXT)
553 .legalFor(ST.hasStdExtD(), {{s64, s32}})
554 .legalFor(ST.hasStdExtZfh(), {{s32, s16}})
555 .legalFor(ST.hasStdExtZfh() && ST.hasStdExtD(), {{s64, s16}})
556 .libcallFor({{s64, s32}})
557 .libcallFor(ST.is64Bit(), {{s128, s32}, {s128, s64}});
558
559 getActionDefinitionsBuilder(G_FCMP)
560 .legalFor(ST.hasStdExtF(), {{sXLen, s32}})
561 .legalFor(ST.hasStdExtD(), {{sXLen, s64}})
562 .legalFor(ST.hasStdExtZfh(), {{sXLen, s16}})
563 .clampScalar(0, sXLen, sXLen)
564 .libcallFor({{sXLen, s32}, {sXLen, s64}})
565 .libcallFor(ST.is64Bit(), {{sXLen, s128}});
566
567
568 getActionDefinitionsBuilder(G_IS_FPCLASS)
569 .customFor(ST.hasStdExtF(), {{s1, s32}})
570 .customFor(ST.hasStdExtD(), {{s1, s64}})
571 .customFor(ST.hasStdExtZfh(), {{s1, s16}})
572 .lowerFor({{s1, s32}, {s1, s64}});
573
574 getActionDefinitionsBuilder(G_FCONSTANT)
575 .legalFor(ST.hasStdExtF(), {s32})
576 .legalFor(ST.hasStdExtD(), {s64})
577 .legalFor(ST.hasStdExtZfh(), {s16})
578 .customFor(.is64Bit(), {s32})
579 .customFor(ST.is64Bit(), {s32, s64})
580 .lowerFor({s64, s128});
581
582 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
583 .legalFor(ST.hasStdExtF(), {{sXLen, s32}})
584 .legalFor(ST.hasStdExtD(), {{sXLen, s64}})
585 .legalFor(ST.hasStdExtZfh(), {{sXLen, s16}})
586 .customFor(ST.is64Bit() && ST.hasStdExtF(), {{s32, s32}})
587 .customFor(ST.is64Bit() && ST.hasStdExtD(), {{s32, s64}})
588 .customFor(ST.is64Bit() && ST.hasStdExtZfh(), {{s32, s16}})
589 .widenScalarToNextPow2(0)
590 .minScalar(0, s32)
591 .libcallFor({{s32, s32}, {s64, s32}, {s32, s64}, {s64, s64}})
592 .libcallFor(ST.is64Bit(), {{s32, s128}, {s64, s128}})
593 .libcallFor(ST.is64Bit(), {{s128, s32}, {s128, s64}, {s128, s128}});
594
595 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
596 .legalFor(ST.hasStdExtF(), {{s32, sXLen}})
597 .legalFor(ST.hasStdExtD(), {{s64, sXLen}})
598 .legalFor(ST.hasStdExtZfh(), {{s16, sXLen}})
599 .widenScalarToNextPow2(1)
600
601 .widenScalarIf(
603 return Query.Types[0].isScalar() && Query.Types[1].isScalar() &&
604 (Query.Types[1].getSizeInBits() < ST.getXLen()) &&
605 ((ST.hasStdExtF() && Query.Types[0].getSizeInBits() == 32) ||
606 (ST.hasStdExtD() && Query.Types[0].getSizeInBits() == 64) ||
607 (ST.hasStdExtZfh() &&
608 Query.Types[0].getSizeInBits() == 16));
609 },
611
612 .minScalar(1, s32)
613 .libcallFor({{s32, s32}, {s64, s32}, {s32, s64}, {s64, s64}})
614 .libcallFor(ST.is64Bit(), {{s128, s32}, {s128, s64}})
615 .libcallFor(ST.is64Bit(), {{s32, s128}, {s64, s128}, {s128, s128}});
616
617
618 getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR, G_FRINT, G_FNEARBYINT,
619 G_INTRINSIC_TRUNC, G_INTRINSIC_ROUND,
620 G_INTRINSIC_ROUNDEVEN})
621 .legalFor(ST.hasStdExtZfa(), {s32})
622 .legalFor(ST.hasStdExtZfa() && ST.hasStdExtD(), {s64})
623 .legalFor(ST.hasStdExtZfa() && ST.hasStdExtZfh(), {s16})
624 .libcallFor({s32, s64})
625 .libcallFor(ST.is64Bit(), {s128});
626
627 getActionDefinitionsBuilder({G_FMAXIMUM, G_FMINIMUM})
628 .legalFor(ST.hasStdExtZfa(), {s32})
629 .legalFor(ST.hasStdExtZfa() && ST.hasStdExtD(), {s64})
630 .legalFor(ST.hasStdExtZfa() && ST.hasStdExtZfh(), {s16});
631
632 getActionDefinitionsBuilder({G_FCOS, G_FSIN, G_FTAN, G_FPOW, G_FLOG, G_FLOG2,
633 G_FLOG10, G_FEXP, G_FEXP2, G_FEXP10, G_FACOS,
634 G_FASIN, G_FATAN, G_FATAN2, G_FCOSH, G_FSINH,
635 G_FTANH, G_FMODF})
636 .libcallFor({s32, s64})
637 .libcallFor(ST.is64Bit(), {s128});
638 getActionDefinitionsBuilder({G_FPOWI, G_FLDEXP})
639 .libcallFor({{s32, s32}, {s64, s32}})
640 .libcallFor(ST.is64Bit(), {s128, s32});
641
642 getActionDefinitionsBuilder(G_FCANONICALIZE)
643 .legalFor(ST.hasStdExtF(), {s32})
644 .legalFor(ST.hasStdExtD(), {s64})
645 .legalFor(ST.hasStdExtZfh(), {s16});
646
647 getActionDefinitionsBuilder(G_VASTART).customFor({p0});
648
649
650
651 getActionDefinitionsBuilder(G_VAARG)
652
653
654 .clampScalar(0, sXLen, sXLen)
655 .lowerForCartesianProduct({sXLen, p0}, {p0});
656
657 getActionDefinitionsBuilder(G_VSCALE)
658 .clampScalar(0, sXLen, sXLen)
659 .customFor({sXLen});
660
661 auto &SplatActions =
662 getActionDefinitionsBuilder(G_SPLAT_VECTOR)
666
667
668
669
670
671
672
673 if (XLen == 32) {
674 if (ST.hasVInstructionsF64() && ST.hasStdExtD())
675 SplatActions.legalIf(all(
676 typeInSet(0, {nxv1s64, nxv2s64, nxv4s64, nxv8s64}), typeIs(1, s64)));
677 else if (ST.hasVInstructionsI64())
678 SplatActions.customIf(all(
679 typeInSet(0, {nxv1s64, nxv2s64, nxv4s64, nxv8s64}), typeIs(1, s64)));
680 }
681
682 SplatActions.clampScalar(1, sXLen, sXLen);
683
690 };
691 getActionDefinitionsBuilder(G_EXTRACT_SUBVECTOR)
692
693
694
695 .bitcastIf(
700 Query.Types[0].getElementCount().divideCoefficientBy(8), 8);
701 return std::pair(0, CastTy);
702 })
708
709 getActionDefinitionsBuilder(G_INSERT_SUBVECTOR)
714
715 getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG_WITH_SUCCESS)
717
718 getActionDefinitionsBuilder({G_ATOMIC_CMPXCHG, G_ATOMICRMW_ADD})
719 .legalFor(ST.hasStdExtA(), {{sXLen, p0}})
720 .libcallFor(.hasStdExtA(), {{s8, p0}, {s16, p0}, {s32, p0}, {s64, p0}})
721 .clampScalar(0, sXLen, sXLen);
722
723 getActionDefinitionsBuilder(G_ATOMICRMW_SUB)
724 .libcallFor(.hasStdExtA(), {{s8, p0}, {s16, p0}, {s32, p0}, {s64, p0}})
725 .clampScalar(0, sXLen, sXLen)
726 .lower();
727
732 };
733
734 getActionDefinitionsBuilder(G_INSERT_VECTOR_ELT)
736 InsertVectorEltPred, typeIs(2, sXLen)))
739
740 getLegacyLegalizerInfo().computeTables();
742}
743
747
749 RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID)) {
750 if (II->hasScalarOperand() && ->IsFPIntrinsic) {
753
754 auto OldScalar = MI.getOperand(II->ScalarOperand + 2).getReg();
755
756 if (MRI.getType(OldScalar).isScalar()) {
757 if (MRI.getType(OldScalar).getSizeInBits() < sXLen.getSizeInBits()) {
760 TargetOpcode::G_ANYEXT);
762 } else if (MRI.getType(OldScalar).getSizeInBits() >
763 sXLen.getSizeInBits()) {
764
765 return false;
766 }
767 }
768 }
769 return true;
770 }
771
772 switch (IntrinsicID) {
773 default:
774 return false;
775 case Intrinsic::vacopy: {
776
777
778
784
785 Register DstLst = MI.getOperand(1).getReg();
786 LLT PtrTy = MRI.getType(DstLst);
787
788
792 auto Tmp = MIRBuilder.buildLoad(PtrTy, MI.getOperand(2), *LoadMMO);
793
794
797 MIRBuilder.buildStore(Tmp, DstLst, *StoreMMO);
798
799 MI.eraseFromParent();
800 return true;
801 }
802 case Intrinsic::riscv_masked_atomicrmw_add:
803 case Intrinsic::riscv_masked_atomicrmw_sub:
804 case Intrinsic::riscv_masked_cmpxchg:
805 return true;
806 }
807}
808
809bool RISCVLegalizerInfo::legalizeVAStart(MachineInstr &MI,
811
812 assert(MI.getOpcode() == TargetOpcode::G_VASTART);
816 LLT AddrTy = MIRBuilder.getMRI()->getType(MI.getOperand(0).getReg());
818 assert(MI.hasOneMemOperand());
819 MIRBuilder.buildStore(FINAddr, MI.getOperand(0).getReg(),
820 *MI.memoperands()[0]);
821 MI.eraseFromParent();
822 return true;
823}
824
825bool RISCVLegalizerInfo::legalizeBRJT(MachineInstr &MI,
828 auto &MF = *MI.getParent()->getParent();
831
832 Register PtrReg = MI.getOperand(0).getReg();
833 LLT PtrTy = MRI.getType(PtrReg);
834 Register IndexReg = MI.getOperand(2).getReg();
835 LLT IndexTy = MRI.getType(IndexReg);
836
838 return false;
839
841 IndexReg = MIRBuilder.buildShl(IndexTy, IndexReg, ShiftAmt).getReg(0);
842
843 auto Addr = MIRBuilder.buildPtrAdd(PtrTy, PtrReg, IndexReg);
844
848
851 default:
852 return false;
854
855
856
857 unsigned LoadOpc =
858 STI.is64Bit() ? TargetOpcode::G_SEXTLOAD : TargetOpcode::G_LOAD;
859 auto Load = MIRBuilder.buildLoadInstr(LoadOpc, IndexTy, Addr, *MMO);
860 TargetReg = MIRBuilder.buildPtrAdd(PtrTy, PtrReg, Load).getReg(0);
861 break;
862 }
864 auto Load = MIRBuilder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, IndexTy,
865 Addr, *MMO);
867 break;
868 }
870 TargetReg = MIRBuilder.buildLoad(PtrTy, Addr, *MMO).getReg(0);
871 break;
872 }
873
875
876 MI.eraseFromParent();
877 return true;
878}
879
880bool RISCVLegalizerInfo::shouldBeInConstantPool(const APInt &APImm,
881 bool ShouldOptForSize) const {
884
885
886
887
889 return false;
890
891
892 if (!STI.useConstantPoolForLargeInts())
893 return false;
894
896 if (Seq.size() <= STI.getMaxBuildIntsCost())
897 return false;
898
899
900
901 if (ShouldOptForSize)
902 return true;
903
904
905
906
907
908
909 unsigned ShiftAmt, AddOpc;
912 return !(!SeqLo.empty() && (SeqLo.size() + 2) <= STI.getMaxBuildIntsCost());
913}
914
915bool RISCVLegalizerInfo::legalizeVScale(MachineInstr &MI,
917 const LLT XLenTy(STI.getXLenVT());
918 Register Dst = MI.getOperand(0).getReg();
919
920
921
922
925
926 return false;
927
928
929
930 uint64_t Val = MI.getOperand(1).getCImm()->getZExtValue();
933 if (Log2 < 3) {
934 auto VLENB = MIB.buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
936 } else if (Log2 > 3) {
937 auto VLENB = MIB.buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
939 } else {
940 MIB.buildInstr(RISCV::G_READ_VLENB, {Dst}, {});
941 }
942 } else if ((Val % 8) == 0) {
943
944
945 auto VLENB = MIB.buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
947 } else {
948 auto VLENB = MIB.buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
951 }
952 MI.eraseFromParent();
953 return true;
954}
955
956
957
958
959
960bool RISCVLegalizerInfo::legalizeExt(MachineInstr &MI,
962
963 unsigned Opc = MI.getOpcode();
964 assert(Opc == TargetOpcode::G_ZEXT || Opc == TargetOpcode::G_SEXT ||
965 Opc == TargetOpcode::G_ANYEXT);
966
967 MachineRegisterInfo &MRI = *MIB.getMRI();
968 Register Dst = MI.getOperand(0).getReg();
969 Register Src = MI.getOperand(1).getReg();
970
971 LLT DstTy = MRI.getType(Dst);
972 int64_t ExtTrueVal = Opc == TargetOpcode::G_SEXT ? -1 : 1;
975 auto SplatTrue =
977 MIB.buildSelect(Dst, Src, SplatTrue, SplatZero);
978
979 MI.eraseFromParent();
980 return true;
981}
982
983bool RISCVLegalizerInfo::legalizeLoadStore(MachineInstr &MI,
987 "Machine instructions must be Load/Store.");
988 MachineRegisterInfo &MRI = *MIB.getMRI();
989 MachineFunction *MF = MI.getMF();
992
993 Register DstReg = MI.getOperand(0).getReg();
994 LLT DataTy = MRI.getType(DstReg);
996 return false;
997
998 if (.hasOneMemOperand())
999 return false;
1000
1001 MachineMemOperand *MMO = *MI.memoperands_begin();
1002
1003 const auto *TLI = STI.getTargetLowering();
1005
1006 if (TLI->allowsMemoryAccessForAlignment(Ctx, DL, VT, *MMO))
1007 return true;
1008
1010 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
1011 "Unexpected unaligned RVV load type");
1012
1013
1014 unsigned NumElements =
1017
1018 Helper.bitcast(MI, 0, NewDataTy);
1019
1020 return true;
1021}
1022
1023
1024
1025
1031
1032
1033
1038 return MIB.buildInstr(RISCV::G_VMSET_VL, {MaskTy}, {VL});
1039}
1040
1041
1042
1043static std::pair<MachineInstrBuilder, MachineInstrBuilder>
1050 return {Mask, VL};
1051}
1052
1057
1058
1059
1060
1061
1062
1063
1064
1065 return MIB.buildInstr(RISCV::G_SPLAT_VECTOR_SPLIT_I64_VL, {Dst},
1067}
1068
1071 const SrcOp &Scalar, const SrcOp &VL,
1076 Unmerge.getReg(1), VL, MIB, MRI);
1077}
1078
1079
1080
1081
1082
1083bool RISCVLegalizerInfo::legalizeSplatVector(MachineInstr &MI,
1085 assert(MI.getOpcode() == TargetOpcode::G_SPLAT_VECTOR);
1086
1087 MachineRegisterInfo &MRI = *MIB.getMRI();
1088
1089 Register Dst = MI.getOperand(0).getReg();
1090 Register SplatVal = MI.getOperand(1).getReg();
1091
1092 LLT VecTy = MRI.getType(Dst);
1093 LLT XLenTy(STI.getXLenVT());
1094
1095
1096 if (XLenTy.getSizeInBits() == 32 &&
1101 MI.eraseFromParent();
1102 return true;
1103 }
1104
1105
1106 MachineInstr &SplatValMI = *MRI.getVRegDef(SplatVal);
1109 MIB.buildInstr(RISCV::G_VMSET_VL, {Dst}, {VL});
1110 MI.eraseFromParent();
1111 return true;
1112 }
1115 MIB.buildInstr(RISCV::G_VMCLR_VL, {Dst}, {VL});
1116 MI.eraseFromParent();
1117 return true;
1118 }
1119
1120
1121
1124 auto ZExtSplatVal = MIB.buildZExt(InterEltTy, SplatVal);
1125 auto And =
1128 auto ZeroSplat =
1131 MI.eraseFromParent();
1132 return true;
1133}
1134
1137 "Unexpected vector LLT");
1141}
1142
1143bool RISCVLegalizerInfo::legalizeExtractSubvector(MachineInstr &MI,
1146
1147 MachineRegisterInfo &MRI = *MIB.getMRI();
1148
1152
1153
1154
1155 if (Idx == 0)
1156 return true;
1157
1158 LLT LitTy = MRI.getType(Dst);
1159 LLT BigTy = MRI.getType(Src);
1160
1162
1163
1164
1165
1168 auto BigZExt = MIB.buildZExt(ExtBigTy, Src);
1173 MI.eraseFromParent();
1174 return true;
1175 }
1176
1177
1178
1179 const RISCVRegisterInfo *TRI = STI.getRegisterInfo();
1181 auto Decompose =
1184 unsigned RemIdx = Decompose.second;
1185
1186
1187
1188
1189 if (RemIdx == 0)
1190 return true;
1191
1192
1193
1194
1198
1199
1200
1201 LLT InterLitTy = BigTy;
1204 getLMUL1Ty(BigTy).getSizeInBits())) {
1205
1206
1207 assert(Decompose.first != RISCV::NoSubRegister);
1209
1210
1211
1213 }
1214
1215
1216
1217 const LLT XLenTy(STI.getXLenVT());
1218 auto SlidedownAmt = MIB.buildVScale(XLenTy, RemIdx);
1222 RISCV::G_VSLIDEDOWN_VL, {InterLitTy},
1223 {MIB.buildUndef(InterLitTy), Vec, SlidedownAmt, Mask, VL, Policy});
1224
1225
1226
1228
1229 MI.eraseFromParent();
1230 return true;
1231}
1232
1233bool RISCVLegalizerInfo::legalizeInsertSubvector(MachineInstr &MI,
1237
1238 MachineRegisterInfo &MRI = *MIB.getMRI();
1239
1244
1245 LLT BigTy = MRI.getType(BigVec);
1246 LLT LitTy = MRI.getType(LitVec);
1247
1248 if (Idx == 0 &&
1249 MRI.getVRegDef(BigVec)->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1250 return true;
1251
1252
1253
1254
1255
1259 if (BigTyMinElts >= 8 && LitTyMinElts >= 8)
1261 IS, 0,
1263
1264
1265
1266
1267
1269 return Helper.widenScalar(IS, 0, ExtBigTy);
1270 }
1271
1272 const RISCVRegisterInfo *TRI = STI.getRegisterInfo();
1273 unsigned SubRegIdx, RemIdx;
1274 std::tie(SubRegIdx, RemIdx) =
1277
1280 STI.expandVScale(LitTy.getSizeInBits()).getKnownMinValue()));
1281 bool ExactlyVecRegSized =
1283 .isKnownMultipleOf(STI.expandVScale(VecRegSize));
1284
1285
1286
1287
1288
1289 if (RemIdx == 0 && ExactlyVecRegSized)
1290 return true;
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306 const LLT XLenTy(STI.getXLenVT());
1307 LLT InterLitTy = BigTy;
1308 Register AlignedExtract = BigVec;
1309 unsigned AlignedIdx = Idx - RemIdx;
1311 getLMUL1Ty(BigTy).getSizeInBits())) {
1313
1314
1315 AlignedExtract =
1317 }
1318
1320 LitVec, 0);
1321
1324
1325
1326
1327 MachineInstrBuilder Inserted;
1328 bool NeedInsertSubvec =
1331 NeedInsertSubvec ? MRI.createGenericVirtualRegister(InterLitTy) : Dst;
1332 if (RemIdx == 0) {
1334 {AlignedExtract, Insert, VL});
1335 } else {
1336 auto SlideupAmt = MIB.buildVScale(XLenTy, RemIdx);
1337
1338 VL = MIB.buildAdd(XLenTy, SlideupAmt, VL);
1339
1340 ElementCount EndIndex =
1343 if (STI.expandVScale(EndIndex) ==
1346
1348 MIB.buildInstr(RISCV::G_VSLIDEUP_VL, {InsertedDst},
1349 {AlignedExtract, Insert, SlideupAmt, Mask, VL, Policy});
1350 }
1351
1352
1353
1354 if (NeedInsertSubvec)
1356
1357 MI.eraseFromParent();
1358 return true;
1359}
1360
1362 switch (Opcode) {
1363 default:
1365 case TargetOpcode::G_ASHR:
1366 return RISCV::G_SRAW;
1367 case TargetOpcode::G_LSHR:
1368 return RISCV::G_SRLW;
1369 case TargetOpcode::G_SHL:
1370 return RISCV::G_SLLW;
1371 case TargetOpcode::G_SDIV:
1372 return RISCV::G_DIVW;
1373 case TargetOpcode::G_UDIV:
1374 return RISCV::G_DIVUW;
1375 case TargetOpcode::G_UREM:
1376 return RISCV::G_REMUW;
1377 case TargetOpcode::G_ROTL:
1378 return RISCV::G_ROLW;
1379 case TargetOpcode::G_ROTR:
1380 return RISCV::G_RORW;
1381 case TargetOpcode::G_CTLZ:
1382 return RISCV::G_CLZW;
1383 case TargetOpcode::G_CTTZ:
1384 return RISCV::G_CTZW;
1385 case TargetOpcode::G_FPTOSI:
1386 return RISCV::G_FCVT_W_RV64;
1387 case TargetOpcode::G_FPTOUI:
1388 return RISCV::G_FCVT_WU_RV64;
1389 }
1390}
1391
1398 switch (MI.getOpcode()) {
1399 default:
1400
1401 return false;
1402 case TargetOpcode::G_ABS:
1404 case TargetOpcode::G_FCONSTANT: {
1405 const APFloat &FVal = MI.getOperand(1).getFPImm()->getValueAPF();
1406
1407
1408 Register DstReg = MI.getOperand(0).getReg();
1410
1411 MI.eraseFromParent();
1412 return true;
1413 }
1414 case TargetOpcode::G_CONSTANT: {
1416
1417
1418 bool ShouldOptForSize = F.hasOptSize();
1419 const ConstantInt *ConstVal = MI.getOperand(1).getCImm();
1420 if (!shouldBeInConstantPool(ConstVal->getValue(), ShouldOptForSize))
1421 return true;
1423 }
1424 case TargetOpcode::G_SUB:
1425 case TargetOpcode::G_ADD: {
1427 Helper.widenScalarSrc(MI, sXLen, 1, TargetOpcode::G_ANYEXT);
1428 Helper.widenScalarSrc(MI, sXLen, 2, TargetOpcode::G_ANYEXT);
1429
1430 Register DstALU = MRI.createGenericVirtualRegister(sXLen);
1431
1434 auto DstSext = MIRBuilder.buildSExtInReg(sXLen, DstALU, 32);
1435
1436 MIRBuilder.buildInstr(TargetOpcode::G_TRUNC, {MO}, {DstSext});
1438
1440 return true;
1441 }
1442 case TargetOpcode::G_SEXT_INREG: {
1443 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
1444 int64_t SizeInBits = MI.getOperand(2).getImm();
1445
1446 if (DstTy.getSizeInBits() == 64 && SizeInBits == 32)
1447 return true;
1448
1449 if (STI.hasStdExtZbb() && (SizeInBits == 8 || SizeInBits == 16))
1450 return true;
1451
1452 return Helper.lower(MI, 0, LLT()) ==
1454 }
1455 case TargetOpcode::G_ASHR:
1456 case TargetOpcode::G_LSHR:
1457 case TargetOpcode::G_SHL: {
1459
1460
1461 unsigned ExtOpc = TargetOpcode::G_ANYEXT;
1462 if (MI.getOpcode() == TargetOpcode::G_ASHR)
1463 ExtOpc = TargetOpcode::G_SEXT;
1464 else if (MI.getOpcode() == TargetOpcode::G_LSHR)
1465 ExtOpc = TargetOpcode::G_ZEXT;
1466
1472 return true;
1473 }
1474
1476 Helper.widenScalarSrc(MI, sXLen, 1, TargetOpcode::G_ANYEXT);
1477 Helper.widenScalarSrc(MI, sXLen, 2, TargetOpcode::G_ANYEXT);
1481 return true;
1482 }
1483 case TargetOpcode::G_SDIV:
1484 case TargetOpcode::G_UDIV:
1485 case TargetOpcode::G_UREM:
1486 case TargetOpcode::G_ROTL:
1487 case TargetOpcode::G_ROTR: {
1489 Helper.widenScalarSrc(MI, sXLen, 1, TargetOpcode::G_ANYEXT);
1490 Helper.widenScalarSrc(MI, sXLen, 2, TargetOpcode::G_ANYEXT);
1494 return true;
1495 }
1496 case TargetOpcode::G_CTLZ:
1497 case TargetOpcode::G_CTTZ: {
1499 Helper.widenScalarSrc(MI, sXLen, 1, TargetOpcode::G_ANYEXT);
1503 return true;
1504 }
1505 case TargetOpcode::G_FPTOSI:
1506 case TargetOpcode::G_FPTOUI: {
1512 return true;
1513 }
1514 case TargetOpcode::G_IS_FPCLASS: {
1515 Register GISFPCLASS = MI.getOperand(0).getReg();
1516 Register Src = MI.getOperand(1).getReg();
1519
1520
1521
1525
1526 auto GFClass = MIB.buildInstr(RISCV::G_FCLASS, {sXLen}, {Src});
1527 auto And = MIB.buildAnd(sXLen, GFClass, FClassMask);
1529
1530 MI.eraseFromParent();
1531 return true;
1532 }
1533 case TargetOpcode::G_BRJT:
1534 return legalizeBRJT(MI, MIRBuilder);
1535 case TargetOpcode::G_VASTART:
1536 return legalizeVAStart(MI, MIRBuilder);
1537 case TargetOpcode::G_VSCALE:
1538 return legalizeVScale(MI, MIRBuilder);
1539 case TargetOpcode::G_ZEXT:
1540 case TargetOpcode::G_SEXT:
1541 case TargetOpcode::G_ANYEXT:
1542 return legalizeExt(MI, MIRBuilder);
1543 case TargetOpcode::G_SPLAT_VECTOR:
1544 return legalizeSplatVector(MI, MIRBuilder);
1545 case TargetOpcode::G_EXTRACT_SUBVECTOR:
1546 return legalizeExtractSubvector(MI, MIRBuilder);
1547 case TargetOpcode::G_INSERT_SUBVECTOR:
1548 return legalizeInsertSubvector(MI, Helper, MIRBuilder);
1549 case TargetOpcode::G_LOAD:
1550 case TargetOpcode::G_STORE:
1551 return legalizeLoadStore(MI, Helper, MIRBuilder);
1552 }
1553
1555}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
uint64_t IntrinsicInst * II
static LLT getLMUL1Ty(LLT VecTy)
Definition RISCVLegalizerInfo.cpp:1135
static MachineInstrBuilder buildAllOnesMask(LLT VecTy, const SrcOp &VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Creates an all ones mask suitable for masking a vector of type VecTy with vector length VL.
Definition RISCVLegalizerInfo.cpp:1034
static std::pair< MachineInstrBuilder, MachineInstrBuilder > buildDefaultVLOps(LLT VecTy, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Gets the two common "VL" operands: an all-ones mask and the vector length.
Definition RISCVLegalizerInfo.cpp:1044
static LegalityPredicate typeIsLegalBoolVec(unsigned TypeIdx, std::initializer_list< LLT > BoolVecTys, const RISCVSubtarget &ST)
Definition RISCVLegalizerInfo.cpp:53
static MachineInstrBuilder buildSplatSplitS64WithVL(const DstOp &Dst, const SrcOp &Passthru, const SrcOp &Scalar, const SrcOp &VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Definition RISCVLegalizerInfo.cpp:1070
static LegalityPredicate typeIsLegalIntOrFPVec(unsigned TypeIdx, std::initializer_list< LLT > IntOrFPVecTys, const RISCVSubtarget &ST)
Definition RISCVLegalizerInfo.cpp:38
static MachineInstrBuilder buildSplatPartsS64WithVL(const DstOp &Dst, const SrcOp &Passthru, Register Lo, Register Hi, const SrcOp &VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Definition RISCVLegalizerInfo.cpp:1054
static LLT getMaskTypeFor(LLT VecTy)
Return the type of the mask type suitable for masking the provided vector type.
Definition RISCVLegalizerInfo.cpp:1026
static LegalityPredicate typeIsLegalPtrVec(unsigned TypeIdx, std::initializer_list< LLT > PtrVecTys, const RISCVSubtarget &ST)
Definition RISCVLegalizerInfo.cpp:63
static unsigned getRISCVWOpcode(unsigned Opcode)
Definition RISCVLegalizerInfo.cpp:1361
This file declares the targeting of the Machinelegalizer class for RISC-V.
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
unsigned getBitWidth() const
Return the number of bits in the APInt.
LLVM_ABI APInt rotr(unsigned rotateAmt) const
Rotate right by rotateAmt.
int64_t getSExtValue() const
Get sign extended value.
This is the shared class of boolean and integer constants.
const APInt & getValue() const
Return the constant as an APInt value reference.
A parsed version of the target data layout string in and methods for querying it.
static constexpr ElementCount getScalable(ScalarTy MinVal)
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
Register getSubVec() const
Register getBigVec() const
uint64_t getIndexImm() const
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalable_vector(unsigned MinNumElements, unsigned ScalarSizeInBits)
Get a low-level scalable vector of some number of elements and element width.
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
This is an important class for using LLVM in a threaded context.
LegalizeRuleSet & maxScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at most as wide as Ty.
LegalizeRuleSet & lower()
The instruction is lowered.
LegalizeRuleSet & clampScalar(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the range of scalar sizes to MinTy and MaxTy.
LegalizeRuleSet & customIf(LegalityPredicate Predicate)
LegalizeRuleSet & widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar to the next power of two that is at least MinSize.
LegalizeRuleSet & customFor(std::initializer_list< LLT > Types)
LLVM_ABI void widenScalarSrc(MachineInstr &MI, LLT WideTy, unsigned OpIdx, unsigned ExtOpcode)
Legalize a single operand OpIdx of the machine instruction MI as a Use by extending the operand's typ...
LLVM_ABI LegalizeResult lowerAbsToMaxNeg(MachineInstr &MI)
LLVM_ABI LegalizeResult bitcast(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by replacing the value type.
@ Legalized
Instruction has been legalized and the MachineFunction changed.
LLVM_ABI LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by splitting it into simpler parts, hopefully understood by the target.
GISelChangeObserver & Observer
To keep track of changes made by the LegalizerHelper.
LLVM_ABI LegalizeResult widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy)
Legalize an instruction by performing the operation on a wider scalar type (for example a 16-bit addi...
MachineIRBuilder & MIRBuilder
Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.
LLVM_ABI LegalizeResult lowerConstant(MachineInstr &MI)
LLVM_ABI void widenScalarDst(MachineInstr &MI, LLT WideTy, unsigned OpIdx=0, unsigned TruncOpcode=TargetOpcode::G_TRUNC)
Legalize a single operand OpIdx of the machine instruction MI as a Def by extending the operand's typ...
LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)
Get the action definition builder for the given opcode.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
Helper class to build MachineInstr.
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II)
Set the insertion point before the specified position.
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildLShr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildIntToPtr(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_INTTOPTR instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert = Opcode .
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineFunction & getMF()
Getter for the function we currently build.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
const DataLayout & getDataLayout() const
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = Addr, MMO.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and insert Res = G_SEXT_INREG Op, ImmOp.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
LLVM_ABI unsigned getEntrySize(const DataLayout &TD) const
getEntrySize - Return the size of each entry in the jump table.
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
LLVM_ABI unsigned getEntryAlignment(const DataLayout &TD) const
getEntryAlignment - Return the alignment of each entry in the jump table.
JTEntryKind getEntryKind() const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
static MachineOperand CreateImm(int64_t Val)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI, LostDebugLocObserver &LocObserver) const override
Called for instructions with the Custom LegalizationAction.
Definition RISCVLegalizerInfo.cpp:1392
bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override
Definition RISCVLegalizerInfo.cpp:744
RISCVLegalizerInfo(const RISCVSubtarget &ST)
Definition RISCVLegalizerInfo.cpp:76
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getVarArgsFrameIndex() const
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static RISCVVType::VLMUL getLMUL(MVT VT)
Wrapper class representing virtual and physical registers.
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
LLVM_ABI LegalityPredicate typeInSet(unsigned TypeIdx, std::initializer_list< LLT > TypesInit)
True iff the given type index is one of the specified types.
Predicate any(Predicate P0, Predicate P1)
True iff P0 or P1 are true.
Predicate all(Predicate P0, Predicate P1)
True iff P0 and P1 are true.
LLVM_ABI LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit)
True iff the given type index is the specified type.
LLVM_ABI LegalizeMutation changeTo(unsigned TypeIdx, LLT Ty)
Select this specific type for the given type index.
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
SmallVector< Inst, 8 > InstSeq
@ TAIL_UNDISTURBED_MASK_UNDISTURBED
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static constexpr unsigned RVVBitsPerBlock
Invariant opcodes: All instruction sets have these as their low opcodes.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI Type * getTypeForLLT(LLT Ty, LLVMContext &C)
Get the type back from LLT.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
LLVM_ABI bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
LLVM_ABI bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
std::function< bool(const LegalityQuery &)> LegalityPredicate
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
bool isa(const From &Val)
isa - Return true if the parameter to the template is an instance of one of the template type argu...
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
unsigned Log2(Align A)
Returns the log2 of the alignment.
This struct is a compact representation of a valid (non-zero power of two) alignment.
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getJumpTable(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a jump table entry.