[LoongArch] Allow f16 codegen with expansion to libcalls by xen0n · Pull Request #94456 · llvm/llvm-project (original) (raw)
@llvm/pr-subscribers-backend-loongarch
Author: WÁNG Xuěruì (xen0n)
Changes
The test case is adapted from llvm/test/CodeGen/RISCV/fp16-promote.ll, because it covers some more IR patterns that ought to be common.
Fixes #93894
Full diff: https://github.com/llvm/llvm-project/pull/94456.diff
2 Files Affected:
- (modified) llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp (+8)
- (added) llvm/test/CodeGen/LoongArch/fp16-promote.ll (+326)
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp index 51384f25d2455..7ab6640550b62 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -171,6 +171,8 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM, // Set operations for 'F' feature.
if (Subtarget.hasBasicF()) {
setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
setTruncStoreAction(MVT::f32, MVT::f16, Expand); setCondCodeAction(FPCCToExpand, MVT::f32, Expand);
setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
@@ -186,6 +188,8 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM, setOperationAction(ISD::FSINCOS, MVT::f32, Expand); setOperationAction(ISD::FPOW, MVT::f32, Expand); setOperationAction(ISD::FREM, MVT::f32, Expand);
setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
if (Subtarget.is64Bit()) setOperationAction(ISD::FRINT, MVT::f32, Legal);
@@ -202,7 +206,9 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM, // Set operations for 'D' feature.
if (Subtarget.hasBasicD()) {
- setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
- setTruncStoreAction(MVT::f64, MVT::f16, Expand); setTruncStoreAction(MVT::f64, MVT::f32, Expand); setCondCodeAction(FPCCToExpand, MVT::f64, Expand);
@@ -219,6 +225,8 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM, setOperationAction(ISD::FSINCOS, MVT::f64, Expand); setOperationAction(ISD::FPOW, MVT::f64, Expand); setOperationAction(ISD::FREM, MVT::f64, Expand);
setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
if (Subtarget.is64Bit()) setOperationAction(ISD::FRINT, MVT::f64, Legal);
diff --git a/llvm/test/CodeGen/LoongArch/fp16-promote.ll b/llvm/test/CodeGen/LoongArch/fp16-promote.ll new file mode 100644 index 0000000000000..75f920b43a06c --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/fp16-promote.ll @@ -0,0 +1,326 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32 +; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64 + +define void @test_load_store(ptr %p, ptr %q) nounwind { +; LA32-LABEL: test_load_store: +; LA32: # %bb.0: +; LA32-NEXT: ld.h a0,a0, a0,a0, 0 +; LA32-NEXT: st.h a0,a0, a0,a1, 0 +; LA32-NEXT: ret +; +; LA64-LABEL: test_load_store: +; LA64: # %bb.0: +; LA64-NEXT: ld.h a0,a0, a0,a0, 0 +; LA64-NEXT: st.h a0,a0, a0,a1, 0 +; LA64-NEXT: ret
- %a = load half, ptr %p
- store half %a, ptr %q
- ret void +}
- +define float @test_fpextend_float(ptr %p) nounwind { +; LA32-LABEL: test_fpextend_float: +; LA32: # %bb.0: +; LA32-NEXT: ld.hu a0,a0, a0,a0, 0 +; LA32-NEXT: b %plt(__gnu_h2f_ieee) +; +; LA64-LABEL: test_fpextend_float: +; LA64: # %bb.0: +; LA64-NEXT: ld.hu a0,a0, a0,a0, 0 +; LA64-NEXT: b %plt(__gnu_h2f_ieee)
- %a = load half, ptr %p
- %r = fpext half %a to float
- ret float %r +}
- +define double @test_fpextend_double(ptr %p) nounwind { +; LA32-LABEL: test_fpextend_double: +; LA32: # %bb.0: +; LA32-NEXT: addi.w sp,sp, sp,sp, -16 +; LA32-NEXT: st.w ra,ra, ra,sp, 12 # 4-byte Folded Spill +; LA32-NEXT: ld.hu a0,a0, a0,a0, 0 +; LA32-NEXT: bl %plt(__gnu_h2f_ieee) +; LA32-NEXT: fcvt.d.s fa0,fa0, fa0,fa0 +; LA32-NEXT: ld.w ra,ra, ra,sp, 12 # 4-byte Folded Reload +; LA32-NEXT: addi.w sp,sp, sp,sp, 16 +; LA32-NEXT: ret +; +; LA64-LABEL: test_fpextend_double: +; LA64: # %bb.0: +; LA64-NEXT: addi.d sp,sp, sp,sp, -16 +; LA64-NEXT: st.d ra,ra, ra,sp, 8 # 8-byte Folded Spill +; LA64-NEXT: ld.hu a0,a0, a0,a0, 0 +; LA64-NEXT: bl %plt(__gnu_h2f_ieee) +; LA64-NEXT: fcvt.d.s fa0,fa0, fa0,fa0 +; LA64-NEXT: ld.d ra,ra, ra,sp, 8 # 8-byte Folded Reload +; LA64-NEXT: addi.d sp,sp, sp,sp, 16 +; LA64-NEXT: ret
- %a = load half, ptr %p
- %r = fpext half %a to double
- ret double %r +}
- +define void @test_fptrunc_float(float %f, ptr %p) nounwind { +; LA32-LABEL: test_fptrunc_float: +; LA32: # %bb.0: +; LA32-NEXT: addi.w sp,sp, sp,sp, -16 +; LA32-NEXT: st.w ra,ra, ra,sp, 12 # 4-byte Folded Spill +; LA32-NEXT: st.w fp,fp, fp,sp, 8 # 4-byte Folded Spill +; LA32-NEXT: move fp,fp, fp,a0 +; LA32-NEXT: bl %plt(__gnu_f2h_ieee) +; LA32-NEXT: st.h a0,a0, a0,fp, 0 +; LA32-NEXT: ld.w fp,fp, fp,sp, 8 # 4-byte Folded Reload +; LA32-NEXT: ld.w ra,ra, ra,sp, 12 # 4-byte Folded Reload +; LA32-NEXT: addi.w sp,sp, sp,sp, 16 +; LA32-NEXT: ret +; +; LA64-LABEL: test_fptrunc_float: +; LA64: # %bb.0: +; LA64-NEXT: addi.d sp,sp, sp,sp, -16 +; LA64-NEXT: st.d ra,ra, ra,sp, 8 # 8-byte Folded Spill +; LA64-NEXT: st.d fp,fp, fp,sp, 0 # 8-byte Folded Spill +; LA64-NEXT: move fp,fp, fp,a0 +; LA64-NEXT: bl %plt(__gnu_f2h_ieee) +; LA64-NEXT: st.h a0,a0, a0,fp, 0 +; LA64-NEXT: ld.d fp,fp, fp,sp, 0 # 8-byte Folded Reload +; LA64-NEXT: ld.d ra,ra, ra,sp, 8 # 8-byte Folded Reload +; LA64-NEXT: addi.d sp,sp, sp,sp, 16 +; LA64-NEXT: ret
- %a = fptrunc float %f to half
- store half %a, ptr %p
- ret void +}
- +define void @test_fptrunc_double(double %d, ptr %p) nounwind { +; LA32-LABEL: test_fptrunc_double: +; LA32: # %bb.0: +; LA32-NEXT: addi.w sp,sp, sp,sp, -16 +; LA32-NEXT: st.w ra,ra, ra,sp, 12 # 4-byte Folded Spill +; LA32-NEXT: st.w fp,fp, fp,sp, 8 # 4-byte Folded Spill +; LA32-NEXT: move fp,fp, fp,a0 +; LA32-NEXT: bl %plt(__truncdfhf2) +; LA32-NEXT: st.h a0,a0, a0,fp, 0 +; LA32-NEXT: ld.w fp,fp, fp,sp, 8 # 4-byte Folded Reload +; LA32-NEXT: ld.w ra,ra, ra,sp, 12 # 4-byte Folded Reload +; LA32-NEXT: addi.w sp,sp, sp,sp, 16 +; LA32-NEXT: ret +; +; LA64-LABEL: test_fptrunc_double: +; LA64: # %bb.0: +; LA64-NEXT: addi.d sp,sp, sp,sp, -16 +; LA64-NEXT: st.d ra,ra, ra,sp, 8 # 8-byte Folded Spill +; LA64-NEXT: st.d fp,fp, fp,sp, 0 # 8-byte Folded Spill +; LA64-NEXT: move fp,fp, fp,a0 +; LA64-NEXT: bl %plt(__truncdfhf2) +; LA64-NEXT: st.h a0,a0, a0,fp, 0 +; LA64-NEXT: ld.d fp,fp, fp,sp, 0 # 8-byte Folded Reload +; LA64-NEXT: ld.d ra,ra, ra,sp, 8 # 8-byte Folded Reload +; LA64-NEXT: addi.d sp,sp, sp,sp, 16 +; LA64-NEXT: ret
- %a = fptrunc double %d to half
- store half %a, ptr %p
- ret void +}
- +define half @test_fadd_reg(half %a, half %b) nounwind { +; LA32-LABEL: test_fadd_reg: +; LA32: # %bb.0: +; LA32-NEXT: addi.w sp,sp, sp,sp, -32 +; LA32-NEXT: st.w ra,ra, ra,sp, 28 # 4-byte Folded Spill +; LA32-NEXT: fst.d fs0,fs0, fs0,sp, 16 # 8-byte Folded Spill +; LA32-NEXT: fst.d fs1,fs1, fs1,sp, 8 # 8-byte Folded Spill +; LA32-NEXT: fmov.s fs0,fs0, fs0,fa0 +; LA32-NEXT: fmov.s fa0,fa0, fa0,fa1 +; LA32-NEXT: bl %plt(__gnu_f2h_ieee) +; LA32-NEXT: bl %plt(__gnu_h2f_ieee) +; LA32-NEXT: fmov.s fs1,fs1, fs1,fa0 +; LA32-NEXT: fmov.s fa0,fa0, fa0,fs0 +; LA32-NEXT: bl %plt(__gnu_f2h_ieee) +; LA32-NEXT: bl %plt(__gnu_h2f_ieee) +; LA32-NEXT: fadd.s fa0,fa0, fa0,fa0, $fs1 +; LA32-NEXT: fld.d fs1,fs1, fs1,sp, 8 # 8-byte Folded Reload +; LA32-NEXT: fld.d fs0,fs0, fs0,sp, 16 # 8-byte Folded Reload +; LA32-NEXT: ld.w ra,ra, ra,sp, 28 # 4-byte Folded Reload +; LA32-NEXT: addi.w sp,sp, sp,sp, 32 +; LA32-NEXT: ret +; +; LA64-LABEL: test_fadd_reg: +; LA64: # %bb.0: +; LA64-NEXT: addi.d sp,sp, sp,sp, -32 +; LA64-NEXT: st.d ra,ra, ra,sp, 24 # 8-byte Folded Spill +; LA64-NEXT: fst.d fs0,fs0, fs0,sp, 16 # 8-byte Folded Spill +; LA64-NEXT: fst.d fs1,fs1, fs1,sp, 8 # 8-byte Folded Spill +; LA64-NEXT: fmov.s fs0,fs0, fs0,fa0 +; LA64-NEXT: fmov.s fa0,fa0, fa0,fa1 +; LA64-NEXT: bl %plt(__gnu_f2h_ieee) +; LA64-NEXT: bl %plt(__gnu_h2f_ieee) +; LA64-NEXT: fmov.s fs1,fs1, fs1,fa0 +; LA64-NEXT: fmov.s fa0,fa0, fa0,fs0 +; LA64-NEXT: bl %plt(__gnu_f2h_ieee) +; LA64-NEXT: bl %plt(__gnu_h2f_ieee) +; LA64-NEXT: fadd.s fa0,fa0, fa0,fa0, $fs1 +; LA64-NEXT: fld.d fs1,fs1, fs1,sp, 8 # 8-byte Folded Reload +; LA64-NEXT: fld.d fs0,fs0, fs0,sp, 16 # 8-byte Folded Reload +; LA64-NEXT: ld.d ra,ra, ra,sp, 24 # 8-byte Folded Reload +; LA64-NEXT: addi.d sp,sp, sp,sp, 32 +; LA64-NEXT: ret
- %r = fadd half %a, %b
- ret half %r +}
- +define void @test_fadd_mem(ptr %p, ptr %q) nounwind { +; LA32-LABEL: test_fadd_mem: +; LA32: # %bb.0: +; LA32-NEXT: addi.w sp,sp, sp,sp, -32 +; LA32-NEXT: st.w ra,ra, ra,sp, 28 # 4-byte Folded Spill +; LA32-NEXT: st.w fp,fp, fp,sp, 24 # 4-byte Folded Spill +; LA32-NEXT: st.w s0,s0, s0,sp, 20 # 4-byte Folded Spill +; LA32-NEXT: fst.d fs0,fs0, fs0,sp, 8 # 8-byte Folded Spill +; LA32-NEXT: move fp,fp, fp,a1 +; LA32-NEXT: move s0,s0, s0,a0 +; LA32-NEXT: ld.hu a0,a0, a0,a0, 0 +; LA32-NEXT: bl %plt(__gnu_h2f_ieee) +; LA32-NEXT: fmov.s fs0,fs0, fs0,fa0 +; LA32-NEXT: ld.hu a0,a0, a0,fp, 0 +; LA32-NEXT: bl %plt(__gnu_h2f_ieee) +; LA32-NEXT: fadd.s fa0,fa0, fa0,fs0, $fa0 +; LA32-NEXT: bl %plt(__gnu_f2h_ieee) +; LA32-NEXT: st.h a0,a0, a0,s0, 0 +; LA32-NEXT: fld.d fs0,fs0, fs0,sp, 8 # 8-byte Folded Reload +; LA32-NEXT: ld.w s0,s0, s0,sp, 20 # 4-byte Folded Reload +; LA32-NEXT: ld.w fp,fp, fp,sp, 24 # 4-byte Folded Reload +; LA32-NEXT: ld.w ra,ra, ra,sp, 28 # 4-byte Folded Reload +; LA32-NEXT: addi.w sp,sp, sp,sp, 32 +; LA32-NEXT: ret +; +; LA64-LABEL: test_fadd_mem: +; LA64: # %bb.0: +; LA64-NEXT: addi.d sp,sp, sp,sp, -32 +; LA64-NEXT: st.d ra,ra, ra,sp, 24 # 8-byte Folded Spill +; LA64-NEXT: st.d fp,fp, fp,sp, 16 # 8-byte Folded Spill +; LA64-NEXT: st.d s0,s0, s0,sp, 8 # 8-byte Folded Spill +; LA64-NEXT: fst.d fs0,fs0, fs0,sp, 0 # 8-byte Folded Spill +; LA64-NEXT: move fp,fp, fp,a1 +; LA64-NEXT: move s0,s0, s0,a0 +; LA64-NEXT: ld.hu a0,a0, a0,a0, 0 +; LA64-NEXT: bl %plt(__gnu_h2f_ieee) +; LA64-NEXT: fmov.s fs0,fs0, fs0,fa0 +; LA64-NEXT: ld.hu a0,a0, a0,fp, 0 +; LA64-NEXT: bl %plt(__gnu_h2f_ieee) +; LA64-NEXT: fadd.s fa0,fa0, fa0,fs0, $fa0 +; LA64-NEXT: bl %plt(__gnu_f2h_ieee) +; LA64-NEXT: st.h a0,a0, a0,s0, 0 +; LA64-NEXT: fld.d fs0,fs0, fs0,sp, 0 # 8-byte Folded Reload +; LA64-NEXT: ld.d s0,s0, s0,sp, 8 # 8-byte Folded Reload +; LA64-NEXT: ld.d fp,fp, fp,sp, 16 # 8-byte Folded Reload +; LA64-NEXT: ld.d ra,ra, ra,sp, 24 # 8-byte Folded Reload +; LA64-NEXT: addi.d sp,sp, sp,sp, 32 +; LA64-NEXT: ret
- %a = load half, ptr %p
- %b = load half, ptr %q
- %r = fadd half %a, %b
- store half %r, ptr %p
- ret void +}
- +define half @test_fmul_reg(half %a, half %b) nounwind { +; LA32-LABEL: test_fmul_reg: +; LA32: # %bb.0: +; LA32-NEXT: addi.w sp,sp, sp,sp, -32 +; LA32-NEXT: st.w ra,ra, ra,sp, 28 # 4-byte Folded Spill +; LA32-NEXT: fst.d fs0,fs0, fs0,sp, 16 # 8-byte Folded Spill +; LA32-NEXT: fst.d fs1,fs1, fs1,sp, 8 # 8-byte Folded Spill +; LA32-NEXT: fmov.s fs0,fs0, fs0,fa0 +; LA32-NEXT: fmov.s fa0,fa0, fa0,fa1 +; LA32-NEXT: bl %plt(__gnu_f2h_ieee) +; LA32-NEXT: bl %plt(__gnu_h2f_ieee) +; LA32-NEXT: fmov.s fs1,fs1, fs1,fa0 +; LA32-NEXT: fmov.s fa0,fa0, fa0,fs0 +; LA32-NEXT: bl %plt(__gnu_f2h_ieee) +; LA32-NEXT: bl %plt(__gnu_h2f_ieee) +; LA32-NEXT: fmul.s fa0,fa0, fa0,fa0, $fs1 +; LA32-NEXT: fld.d fs1,fs1, fs1,sp, 8 # 8-byte Folded Reload +; LA32-NEXT: fld.d fs0,fs0, fs0,sp, 16 # 8-byte Folded Reload +; LA32-NEXT: ld.w ra,ra, ra,sp, 28 # 4-byte Folded Reload +; LA32-NEXT: addi.w sp,sp, sp,sp, 32 +; LA32-NEXT: ret +; +; LA64-LABEL: test_fmul_reg: +; LA64: # %bb.0: +; LA64-NEXT: addi.d sp,sp, sp,sp, -32 +; LA64-NEXT: st.d ra,ra, ra,sp, 24 # 8-byte Folded Spill +; LA64-NEXT: fst.d fs0,fs0, fs0,sp, 16 # 8-byte Folded Spill +; LA64-NEXT: fst.d fs1,fs1, fs1,sp, 8 # 8-byte Folded Spill +; LA64-NEXT: fmov.s fs0,fs0, fs0,fa0 +; LA64-NEXT: fmov.s fa0,fa0, fa0,fa1 +; LA64-NEXT: bl %plt(__gnu_f2h_ieee) +; LA64-NEXT: bl %plt(__gnu_h2f_ieee) +; LA64-NEXT: fmov.s fs1,fs1, fs1,fa0 +; LA64-NEXT: fmov.s fa0,fa0, fa0,fs0 +; LA64-NEXT: bl %plt(__gnu_f2h_ieee) +; LA64-NEXT: bl %plt(__gnu_h2f_ieee) +; LA64-NEXT: fmul.s fa0,fa0, fa0,fa0, $fs1 +; LA64-NEXT: fld.d fs1,fs1, fs1,sp, 8 # 8-byte Folded Reload +; LA64-NEXT: fld.d fs0,fs0, fs0,sp, 16 # 8-byte Folded Reload +; LA64-NEXT: ld.d ra,ra, ra,sp, 24 # 8-byte Folded Reload +; LA64-NEXT: addi.d sp,sp, sp,sp, 32 +; LA64-NEXT: ret
- %r = fmul half %a, %b
- ret half %r +}
- +define void @test_fmul_mem(ptr %p, ptr %q) nounwind { +; LA32-LABEL: test_fmul_mem: +; LA32: # %bb.0: +; LA32-NEXT: addi.w sp,sp, sp,sp, -32 +; LA32-NEXT: st.w ra,ra, ra,sp, 28 # 4-byte Folded Spill +; LA32-NEXT: st.w fp,fp, fp,sp, 24 # 4-byte Folded Spill +; LA32-NEXT: st.w s0,s0, s0,sp, 20 # 4-byte Folded Spill +; LA32-NEXT: fst.d fs0,fs0, fs0,sp, 8 # 8-byte Folded Spill +; LA32-NEXT: move fp,fp, fp,a1 +; LA32-NEXT: move s0,s0, s0,a0 +; LA32-NEXT: ld.hu a0,a0, a0,a0, 0 +; LA32-NEXT: bl %plt(__gnu_h2f_ieee) +; LA32-NEXT: fmov.s fs0,fs0, fs0,fa0 +; LA32-NEXT: ld.hu a0,a0, a0,fp, 0 +; LA32-NEXT: bl %plt(__gnu_h2f_ieee) +; LA32-NEXT: fmul.s fa0,fa0, fa0,fs0, $fa0 +; LA32-NEXT: bl %plt(__gnu_f2h_ieee) +; LA32-NEXT: st.h a0,a0, a0,s0, 0 +; LA32-NEXT: fld.d fs0,fs0, fs0,sp, 8 # 8-byte Folded Reload +; LA32-NEXT: ld.w s0,s0, s0,sp, 20 # 4-byte Folded Reload +; LA32-NEXT: ld.w fp,fp, fp,sp, 24 # 4-byte Folded Reload +; LA32-NEXT: ld.w ra,ra, ra,sp, 28 # 4-byte Folded Reload +; LA32-NEXT: addi.w sp,sp, sp,sp, 32 +; LA32-NEXT: ret +; +; LA64-LABEL: test_fmul_mem: +; LA64: # %bb.0: +; LA64-NEXT: addi.d sp,sp, sp,sp, -32 +; LA64-NEXT: st.d ra,ra, ra,sp, 24 # 8-byte Folded Spill +; LA64-NEXT: st.d fp,fp, fp,sp, 16 # 8-byte Folded Spill +; LA64-NEXT: st.d s0,s0, s0,sp, 8 # 8-byte Folded Spill +; LA64-NEXT: fst.d fs0,fs0, fs0,sp, 0 # 8-byte Folded Spill +; LA64-NEXT: move fp,fp, fp,a1 +; LA64-NEXT: move s0,s0, s0,a0 +; LA64-NEXT: ld.hu a0,a0, a0,a0, 0 +; LA64-NEXT: bl %plt(__gnu_h2f_ieee) +; LA64-NEXT: fmov.s fs0,fs0, fs0,fa0 +; LA64-NEXT: ld.hu a0,a0, a0,fp, 0 +; LA64-NEXT: bl %plt(__gnu_h2f_ieee) +; LA64-NEXT: fmul.s fa0,fa0, fa0,fs0, $fa0 +; LA64-NEXT: bl %plt(__gnu_f2h_ieee) +; LA64-NEXT: st.h a0,a0, a0,s0, 0 +; LA64-NEXT: fld.d fs0,fs0, fs0,sp, 0 # 8-byte Folded Reload +; LA64-NEXT: ld.d s0,s0, s0,sp, 8 # 8-byte Folded Reload +; LA64-NEXT: ld.d fp,fp, fp,sp, 16 # 8-byte Folded Reload +; LA64-NEXT: ld.d ra,ra, ra,sp, 24 # 8-byte Folded Reload +; LA64-NEXT: addi.d sp,sp, sp,sp, 32 +; LA64-NEXT: ret
- %a = load half, ptr %p
- %b = load half, ptr %q
- %r = fmul half %a, %b
- store half %r, ptr %p
- ret void +}