forked from OSchip/llvm-project
[RISCV] Add intrinsics for vslide1up/down, vfslide1up/down instruction
This patch adds intrinsics for vslide1up, vslide1down, vfslide1up, vfslide1down. Authored-by: Roger Ferrer Ibanez <rofirrim@gmail.com> Co-Authored-by: ShihPo Hung <shihpo.hung@sifive.com> Differential Revision: https://reviews.llvm.org/D93608
This commit is contained in:
parent
29ed846d67
commit
c8874464b5
|
@ -535,6 +535,11 @@ let TargetPrefix = "riscv" in {
|
|||
defm vslideup : RISCVTernaryAAAX;
|
||||
defm vslidedown : RISCVTernaryAAAX;
|
||||
|
||||
defm vslide1up : RISCVBinaryAAX;
|
||||
defm vslide1down : RISCVBinaryAAX;
|
||||
defm vfslide1up : RISCVBinaryAAX;
|
||||
defm vfslide1down : RISCVBinaryAAX;
|
||||
|
||||
defm vaaddu : RISCVSaturatingBinaryAAX;
|
||||
defm vaadd : RISCVSaturatingBinaryAAX;
|
||||
defm vasubu : RISCVSaturatingBinaryAAX;
|
||||
|
@ -554,5 +559,4 @@ let TargetPrefix = "riscv" in {
|
|||
defm vmfle : RISCVCompare;
|
||||
defm vmfgt : RISCVCompare;
|
||||
defm vmfge : RISCVCompare;
|
||||
|
||||
} // TargetPrefix = "riscv"
|
||||
|
|
|
@ -673,10 +673,10 @@ multiclass VPseudoBinaryV_VV {
|
|||
defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m>;
|
||||
}
|
||||
|
||||
multiclass VPseudoBinaryV_VX<bit IsFloat> {
|
||||
multiclass VPseudoBinaryV_VX<bit IsFloat, string Constraint = ""> {
|
||||
foreach m = MxList.m in
|
||||
defm !if(IsFloat, "_VF", "_VX") : VPseudoBinary<m.vrclass, m.vrclass,
|
||||
!if(IsFloat, FPR32, GPR), m>;
|
||||
!if(IsFloat, FPR32, GPR), m, Constraint>;
|
||||
}
|
||||
|
||||
multiclass VPseudoBinaryV_VI<Operand ImmType = simm5> {
|
||||
|
@ -1954,8 +1954,17 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1,
|
|||
//===----------------------------------------------------------------------===//
|
||||
// 17.3. Vector Slide Instructions
|
||||
//===----------------------------------------------------------------------===//
|
||||
defm PseudoVSLIDEUP : VPseudoTernaryV_VX_VI<uimm5, "@earlyclobber $rd">;
|
||||
defm PseudoVSLIDEDOWN : VPseudoTernaryV_VX_VI<uimm5>;
|
||||
let Predicates = [HasStdExtV] in {
|
||||
defm PseudoVSLIDEUP : VPseudoTernaryV_VX_VI<uimm5, "@earlyclobber $rd">;
|
||||
defm PseudoVSLIDEDOWN : VPseudoTernaryV_VX_VI<uimm5>;
|
||||
defm PseudoVSLIDE1UP : VPseudoBinaryV_VX</*IsFloat*/false, "@earlyclobber $rd">;
|
||||
defm PseudoVSLIDE1DOWN : VPseudoBinaryV_VX</*IsFloat*/false>;
|
||||
} // Predicates = [HasStdExtV]
|
||||
|
||||
let Predicates = [HasStdExtV, HasStdExtF] in {
|
||||
defm PseudoVFSLIDE1UP : VPseudoBinaryV_VX</*IsFloat*/true, "@earlyclobber $rd">;
|
||||
defm PseudoVFSLIDE1DOWN : VPseudoBinaryV_VX</*IsFloat*/true>;
|
||||
} // Predicates = [HasStdExtV, HasStdExtF]
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Patterns.
|
||||
|
@ -2323,9 +2332,13 @@ foreach fvti = AllFloatVectors in {
|
|||
let Predicates = [HasStdExtV] in {
|
||||
defm "" : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllIntegerVectors, uimm5>;
|
||||
defm "" : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllIntegerVectors, uimm5>;
|
||||
defm "" : VPatBinaryV_VX<"int_riscv_vslide1up", "PseudoVSLIDE1UP", AllIntegerVectors>;
|
||||
defm "" : VPatBinaryV_VX<"int_riscv_vslide1down", "PseudoVSLIDE1DOWN", AllIntegerVectors>;
|
||||
} // Predicates = [HasStdExtV]
|
||||
|
||||
let Predicates = [HasStdExtV, HasStdExtF] in {
|
||||
defm "" : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllFloatVectors, uimm5>;
|
||||
defm "" : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllFloatVectors, uimm5>;
|
||||
defm "" : VPatBinaryV_VX<"int_riscv_vfslide1up", "PseudoVFSLIDE1UP", AllFloatVectors>;
|
||||
defm "" : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN", AllFloatVectors>;
|
||||
} // Predicates = [HasStdExtV, HasStdExtF]
|
||||
|
|
|
@ -0,0 +1,512 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
|
||||
; RUN: --riscv-no-aliases < %s | FileCheck %s
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
|
||||
<vscale x 1 x half>,
|
||||
half,
|
||||
i32);
|
||||
|
||||
define <vscale x 1 x half> @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
|
||||
<vscale x 1 x half> %0,
|
||||
half %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 1 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfslide1down.mask.nxv1f16.f16(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x half>,
|
||||
half,
|
||||
<vscale x 1 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 1 x half> @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.mask.nxv1f16.f16(
|
||||
<vscale x 1 x half> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
half %2,
|
||||
<vscale x 1 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 1 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16(
|
||||
<vscale x 2 x half>,
|
||||
half,
|
||||
i32);
|
||||
|
||||
define <vscale x 2 x half> @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16(
|
||||
<vscale x 2 x half> %0,
|
||||
half %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 2 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x half> @llvm.riscv.vfslide1down.mask.nxv2f16.f16(
|
||||
<vscale x 2 x half>,
|
||||
<vscale x 2 x half>,
|
||||
half,
|
||||
<vscale x 2 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 2 x half> @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x half> @llvm.riscv.vfslide1down.mask.nxv2f16.f16(
|
||||
<vscale x 2 x half> %0,
|
||||
<vscale x 2 x half> %1,
|
||||
half %2,
|
||||
<vscale x 2 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 2 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16(
|
||||
<vscale x 4 x half>,
|
||||
half,
|
||||
i32);
|
||||
|
||||
define <vscale x 4 x half> @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16(
|
||||
<vscale x 4 x half> %0,
|
||||
half %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 4 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x half> @llvm.riscv.vfslide1down.mask.nxv4f16.f16(
|
||||
<vscale x 4 x half>,
|
||||
<vscale x 4 x half>,
|
||||
half,
|
||||
<vscale x 4 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 4 x half> @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x half> @llvm.riscv.vfslide1down.mask.nxv4f16.f16(
|
||||
<vscale x 4 x half> %0,
|
||||
<vscale x 4 x half> %1,
|
||||
half %2,
|
||||
<vscale x 4 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 4 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16(
|
||||
<vscale x 8 x half>,
|
||||
half,
|
||||
i32);
|
||||
|
||||
define <vscale x 8 x half> @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16(
|
||||
<vscale x 8 x half> %0,
|
||||
half %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 8 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x half> @llvm.riscv.vfslide1down.mask.nxv8f16.f16(
|
||||
<vscale x 8 x half>,
|
||||
<vscale x 8 x half>,
|
||||
half,
|
||||
<vscale x 8 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 8 x half> @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v18, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x half> @llvm.riscv.vfslide1down.mask.nxv8f16.f16(
|
||||
<vscale x 8 x half> %0,
|
||||
<vscale x 8 x half> %1,
|
||||
half %2,
|
||||
<vscale x 8 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 8 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16(
|
||||
<vscale x 16 x half>,
|
||||
half,
|
||||
i32);
|
||||
|
||||
define <vscale x 16 x half> @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16(
|
||||
<vscale x 16 x half> %0,
|
||||
half %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 16 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x half> @llvm.riscv.vfslide1down.mask.nxv16f16.f16(
|
||||
<vscale x 16 x half>,
|
||||
<vscale x 16 x half>,
|
||||
half,
|
||||
<vscale x 16 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 16 x half> @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v20, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x half> @llvm.riscv.vfslide1down.mask.nxv16f16.f16(
|
||||
<vscale x 16 x half> %0,
|
||||
<vscale x 16 x half> %1,
|
||||
half %2,
|
||||
<vscale x 16 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 16 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16(
|
||||
<vscale x 32 x half>,
|
||||
half,
|
||||
i32);
|
||||
|
||||
define <vscale x 32 x half> @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16(
|
||||
<vscale x 32 x half> %0,
|
||||
half %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 32 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 32 x half> @llvm.riscv.vfslide1down.mask.nxv32f16.f16(
|
||||
<vscale x 32 x half>,
|
||||
<vscale x 32 x half>,
|
||||
half,
|
||||
<vscale x 32 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 32 x half> @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vle16.v v8, (a0)
|
||||
; CHECK-NEXT: fmv.h.x ft0, a1
|
||||
; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v8, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 32 x half> @llvm.riscv.vfslide1down.mask.nxv32f16.f16(
|
||||
<vscale x 32 x half> %0,
|
||||
<vscale x 32 x half> %1,
|
||||
half %2,
|
||||
<vscale x 32 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 32 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32(
|
||||
<vscale x 1 x float>,
|
||||
float,
|
||||
i32);
|
||||
|
||||
define <vscale x 1 x float> @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32(
|
||||
<vscale x 1 x float> %0,
|
||||
float %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 1 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32(
|
||||
<vscale x 1 x float>,
|
||||
<vscale x 1 x float>,
|
||||
float,
|
||||
<vscale x 1 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 1 x float> @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32(
|
||||
<vscale x 1 x float> %0,
|
||||
<vscale x 1 x float> %1,
|
||||
float %2,
|
||||
<vscale x 1 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 1 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32(
|
||||
<vscale x 2 x float>,
|
||||
float,
|
||||
i32);
|
||||
|
||||
define <vscale x 2 x float> @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32(
|
||||
<vscale x 2 x float> %0,
|
||||
float %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 2 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32(
|
||||
<vscale x 2 x float>,
|
||||
<vscale x 2 x float>,
|
||||
float,
|
||||
<vscale x 2 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 2 x float> @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32(
|
||||
<vscale x 2 x float> %0,
|
||||
<vscale x 2 x float> %1,
|
||||
float %2,
|
||||
<vscale x 2 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 2 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32(
|
||||
<vscale x 4 x float>,
|
||||
float,
|
||||
i32);
|
||||
|
||||
define <vscale x 4 x float> @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32(
|
||||
<vscale x 4 x float> %0,
|
||||
float %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 4 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32(
|
||||
<vscale x 4 x float>,
|
||||
<vscale x 4 x float>,
|
||||
float,
|
||||
<vscale x 4 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 4 x float> @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v18, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32(
|
||||
<vscale x 4 x float> %0,
|
||||
<vscale x 4 x float> %1,
|
||||
float %2,
|
||||
<vscale x 4 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 4 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32(
|
||||
<vscale x 8 x float>,
|
||||
float,
|
||||
i32);
|
||||
|
||||
define <vscale x 8 x float> @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32(
|
||||
<vscale x 8 x float> %0,
|
||||
float %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 8 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32(
|
||||
<vscale x 8 x float>,
|
||||
<vscale x 8 x float>,
|
||||
float,
|
||||
<vscale x 8 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 8 x float> @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v20, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32(
|
||||
<vscale x 8 x float> %0,
|
||||
<vscale x 8 x float> %1,
|
||||
float %2,
|
||||
<vscale x 8 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 8 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32(
|
||||
<vscale x 16 x float>,
|
||||
float,
|
||||
i32);
|
||||
|
||||
define <vscale x 16 x float> @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32(
|
||||
<vscale x 16 x float> %0,
|
||||
float %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 16 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32(
|
||||
<vscale x 16 x float>,
|
||||
<vscale x 16 x float>,
|
||||
float,
|
||||
<vscale x 16 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 16 x float> @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vle32.v v8, (a0)
|
||||
; CHECK-NEXT: fmv.w.x ft0, a1
|
||||
; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v8, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32(
|
||||
<vscale x 16 x float> %0,
|
||||
<vscale x 16 x float> %1,
|
||||
float %2,
|
||||
<vscale x 16 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 16 x float> %a
|
||||
}
|
|
@ -0,0 +1,698 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
|
||||
; RUN: --riscv-no-aliases < %s | FileCheck %s
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
|
||||
<vscale x 1 x half>,
|
||||
half,
|
||||
i64);
|
||||
|
||||
define <vscale x 1 x half> @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
|
||||
<vscale x 1 x half> %0,
|
||||
half %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 1 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfslide1down.mask.nxv1f16.f16(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x half>,
|
||||
half,
|
||||
<vscale x 1 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 1 x half> @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.mask.nxv1f16.f16(
|
||||
<vscale x 1 x half> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
half %2,
|
||||
<vscale x 1 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 1 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16(
|
||||
<vscale x 2 x half>,
|
||||
half,
|
||||
i64);
|
||||
|
||||
define <vscale x 2 x half> @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16(
|
||||
<vscale x 2 x half> %0,
|
||||
half %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 2 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x half> @llvm.riscv.vfslide1down.mask.nxv2f16.f16(
|
||||
<vscale x 2 x half>,
|
||||
<vscale x 2 x half>,
|
||||
half,
|
||||
<vscale x 2 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 2 x half> @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x half> @llvm.riscv.vfslide1down.mask.nxv2f16.f16(
|
||||
<vscale x 2 x half> %0,
|
||||
<vscale x 2 x half> %1,
|
||||
half %2,
|
||||
<vscale x 2 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 2 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16(
|
||||
<vscale x 4 x half>,
|
||||
half,
|
||||
i64);
|
||||
|
||||
define <vscale x 4 x half> @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16(
|
||||
<vscale x 4 x half> %0,
|
||||
half %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 4 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x half> @llvm.riscv.vfslide1down.mask.nxv4f16.f16(
|
||||
<vscale x 4 x half>,
|
||||
<vscale x 4 x half>,
|
||||
half,
|
||||
<vscale x 4 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 4 x half> @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x half> @llvm.riscv.vfslide1down.mask.nxv4f16.f16(
|
||||
<vscale x 4 x half> %0,
|
||||
<vscale x 4 x half> %1,
|
||||
half %2,
|
||||
<vscale x 4 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 4 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16(
|
||||
<vscale x 8 x half>,
|
||||
half,
|
||||
i64);
|
||||
|
||||
define <vscale x 8 x half> @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16(
|
||||
<vscale x 8 x half> %0,
|
||||
half %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 8 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x half> @llvm.riscv.vfslide1down.mask.nxv8f16.f16(
|
||||
<vscale x 8 x half>,
|
||||
<vscale x 8 x half>,
|
||||
half,
|
||||
<vscale x 8 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 8 x half> @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v18, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x half> @llvm.riscv.vfslide1down.mask.nxv8f16.f16(
|
||||
<vscale x 8 x half> %0,
|
||||
<vscale x 8 x half> %1,
|
||||
half %2,
|
||||
<vscale x 8 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 8 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16(
|
||||
<vscale x 16 x half>,
|
||||
half,
|
||||
i64);
|
||||
|
||||
define <vscale x 16 x half> @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16(
|
||||
<vscale x 16 x half> %0,
|
||||
half %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 16 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x half> @llvm.riscv.vfslide1down.mask.nxv16f16.f16(
|
||||
<vscale x 16 x half>,
|
||||
<vscale x 16 x half>,
|
||||
half,
|
||||
<vscale x 16 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 16 x half> @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v20, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x half> @llvm.riscv.vfslide1down.mask.nxv16f16.f16(
|
||||
<vscale x 16 x half> %0,
|
||||
<vscale x 16 x half> %1,
|
||||
half %2,
|
||||
<vscale x 16 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 16 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16(
|
||||
<vscale x 32 x half>,
|
||||
half,
|
||||
i64);
|
||||
|
||||
define <vscale x 32 x half> @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16(
|
||||
<vscale x 32 x half> %0,
|
||||
half %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 32 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 32 x half> @llvm.riscv.vfslide1down.mask.nxv32f16.f16(
|
||||
<vscale x 32 x half>,
|
||||
<vscale x 32 x half>,
|
||||
half,
|
||||
<vscale x 32 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 32 x half> @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vle16.v v8, (a0)
|
||||
; CHECK-NEXT: fmv.h.x ft0, a1
|
||||
; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v8, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 32 x half> @llvm.riscv.vfslide1down.mask.nxv32f16.f16(
|
||||
<vscale x 32 x half> %0,
|
||||
<vscale x 32 x half> %1,
|
||||
half %2,
|
||||
<vscale x 32 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 32 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32(
|
||||
<vscale x 1 x float>,
|
||||
float,
|
||||
i64);
|
||||
|
||||
define <vscale x 1 x float> @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32(
|
||||
<vscale x 1 x float> %0,
|
||||
float %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 1 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32(
|
||||
<vscale x 1 x float>,
|
||||
<vscale x 1 x float>,
|
||||
float,
|
||||
<vscale x 1 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 1 x float> @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32(
|
||||
<vscale x 1 x float> %0,
|
||||
<vscale x 1 x float> %1,
|
||||
float %2,
|
||||
<vscale x 1 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 1 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32(
|
||||
<vscale x 2 x float>,
|
||||
float,
|
||||
i64);
|
||||
|
||||
define <vscale x 2 x float> @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32(
|
||||
<vscale x 2 x float> %0,
|
||||
float %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 2 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32(
|
||||
<vscale x 2 x float>,
|
||||
<vscale x 2 x float>,
|
||||
float,
|
||||
<vscale x 2 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 2 x float> @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32(
|
||||
<vscale x 2 x float> %0,
|
||||
<vscale x 2 x float> %1,
|
||||
float %2,
|
||||
<vscale x 2 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 2 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32(
|
||||
<vscale x 4 x float>,
|
||||
float,
|
||||
i64);
|
||||
|
||||
define <vscale x 4 x float> @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32(
|
||||
<vscale x 4 x float> %0,
|
||||
float %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 4 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32(
|
||||
<vscale x 4 x float>,
|
||||
<vscale x 4 x float>,
|
||||
float,
|
||||
<vscale x 4 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 4 x float> @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v18, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32(
|
||||
<vscale x 4 x float> %0,
|
||||
<vscale x 4 x float> %1,
|
||||
float %2,
|
||||
<vscale x 4 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 4 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32(
|
||||
<vscale x 8 x float>,
|
||||
float,
|
||||
i64);
|
||||
|
||||
define <vscale x 8 x float> @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32(
|
||||
<vscale x 8 x float> %0,
|
||||
float %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 8 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32(
|
||||
<vscale x 8 x float>,
|
||||
<vscale x 8 x float>,
|
||||
float,
|
||||
<vscale x 8 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 8 x float> @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v20, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32(
|
||||
<vscale x 8 x float> %0,
|
||||
<vscale x 8 x float> %1,
|
||||
float %2,
|
||||
<vscale x 8 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 8 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32(
|
||||
<vscale x 16 x float>,
|
||||
float,
|
||||
i64);
|
||||
|
||||
define <vscale x 16 x float> @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32(
|
||||
<vscale x 16 x float> %0,
|
||||
float %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 16 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32(
|
||||
<vscale x 16 x float>,
|
||||
<vscale x 16 x float>,
|
||||
float,
|
||||
<vscale x 16 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 16 x float> @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vle32.v v8, (a0)
|
||||
; CHECK-NEXT: fmv.w.x ft0, a1
|
||||
; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v8, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32(
|
||||
<vscale x 16 x float> %0,
|
||||
<vscale x 16 x float> %1,
|
||||
float %2,
|
||||
<vscale x 16 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 16 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64(
|
||||
<vscale x 1 x double>,
|
||||
double,
|
||||
i64);
|
||||
|
||||
define <vscale x 1 x double> @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.d.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64(
|
||||
<vscale x 1 x double> %0,
|
||||
double %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 1 x double> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64(
|
||||
<vscale x 1 x double>,
|
||||
<vscale x 1 x double>,
|
||||
double,
|
||||
<vscale x 1 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 1 x double> @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.d.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64(
|
||||
<vscale x 1 x double> %0,
|
||||
<vscale x 1 x double> %1,
|
||||
double %2,
|
||||
<vscale x 1 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 1 x double> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64(
|
||||
<vscale x 2 x double>,
|
||||
double,
|
||||
i64);
|
||||
|
||||
define <vscale x 2 x double> @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.d.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64(
|
||||
<vscale x 2 x double> %0,
|
||||
double %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 2 x double> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64(
|
||||
<vscale x 2 x double>,
|
||||
<vscale x 2 x double>,
|
||||
double,
|
||||
<vscale x 2 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 2 x double> @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.d.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v18, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64(
|
||||
<vscale x 2 x double> %0,
|
||||
<vscale x 2 x double> %1,
|
||||
double %2,
|
||||
<vscale x 2 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 2 x double> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64(
|
||||
<vscale x 4 x double>,
|
||||
double,
|
||||
i64);
|
||||
|
||||
define <vscale x 4 x double> @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.d.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64(
|
||||
<vscale x 4 x double> %0,
|
||||
double %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 4 x double> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64(
|
||||
<vscale x 4 x double>,
|
||||
<vscale x 4 x double>,
|
||||
double,
|
||||
<vscale x 4 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 4 x double> @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.d.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v20, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64(
|
||||
<vscale x 4 x double> %0,
|
||||
<vscale x 4 x double> %1,
|
||||
double %2,
|
||||
<vscale x 4 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 4 x double> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64(
|
||||
<vscale x 8 x double>,
|
||||
double,
|
||||
i64);
|
||||
|
||||
define <vscale x 8 x double> @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.d.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e64,m8,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v16, ft0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64(
|
||||
<vscale x 8 x double> %0,
|
||||
double %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 8 x double> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64(
|
||||
<vscale x 8 x double>,
|
||||
<vscale x 8 x double>,
|
||||
double,
|
||||
<vscale x 8 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 8 x double> @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
|
||||
; CHECK-NEXT: vle64.v v8, (a0)
|
||||
; CHECK-NEXT: fmv.d.x ft0, a1
|
||||
; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu
|
||||
; CHECK-NEXT: vfslide1down.vf v16, v8, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64(
|
||||
<vscale x 8 x double> %0,
|
||||
<vscale x 8 x double> %1,
|
||||
double %2,
|
||||
<vscale x 8 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 8 x double> %a
|
||||
}
|
|
@ -0,0 +1,523 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
|
||||
; RUN: --riscv-no-aliases < %s | FileCheck %s
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
|
||||
<vscale x 1 x half>,
|
||||
half,
|
||||
i32);
|
||||
|
||||
define <vscale x 1 x half> @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v25, v16, ft0
|
||||
; CHECK-NEXT: vmv1r.v v16, v25
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
|
||||
<vscale x 1 x half> %0,
|
||||
half %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 1 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x half>,
|
||||
half,
|
||||
<vscale x 1 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 1 x half> @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
|
||||
<vscale x 1 x half> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
half %2,
|
||||
<vscale x 1 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 1 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16(
|
||||
<vscale x 2 x half>,
|
||||
half,
|
||||
i32);
|
||||
|
||||
define <vscale x 2 x half> @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v25, v16, ft0
|
||||
; CHECK-NEXT: vmv1r.v v16, v25
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16(
|
||||
<vscale x 2 x half> %0,
|
||||
half %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 2 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x half> @llvm.riscv.vfslide1up.mask.nxv2f16.f16(
|
||||
<vscale x 2 x half>,
|
||||
<vscale x 2 x half>,
|
||||
half,
|
||||
<vscale x 2 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 2 x half> @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x half> @llvm.riscv.vfslide1up.mask.nxv2f16.f16(
|
||||
<vscale x 2 x half> %0,
|
||||
<vscale x 2 x half> %1,
|
||||
half %2,
|
||||
<vscale x 2 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 2 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16(
|
||||
<vscale x 4 x half>,
|
||||
half,
|
||||
i32);
|
||||
|
||||
define <vscale x 4 x half> @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v25, v16, ft0
|
||||
; CHECK-NEXT: vmv1r.v v16, v25
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16(
|
||||
<vscale x 4 x half> %0,
|
||||
half %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 4 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x half> @llvm.riscv.vfslide1up.mask.nxv4f16.f16(
|
||||
<vscale x 4 x half>,
|
||||
<vscale x 4 x half>,
|
||||
half,
|
||||
<vscale x 4 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 4 x half> @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x half> @llvm.riscv.vfslide1up.mask.nxv4f16.f16(
|
||||
<vscale x 4 x half> %0,
|
||||
<vscale x 4 x half> %1,
|
||||
half %2,
|
||||
<vscale x 4 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 4 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16(
|
||||
<vscale x 8 x half>,
|
||||
half,
|
||||
i32);
|
||||
|
||||
define <vscale x 8 x half> @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v26, v16, ft0
|
||||
; CHECK-NEXT: vmv2r.v v16, v26
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16(
|
||||
<vscale x 8 x half> %0,
|
||||
half %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 8 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x half> @llvm.riscv.vfslide1up.mask.nxv8f16.f16(
|
||||
<vscale x 8 x half>,
|
||||
<vscale x 8 x half>,
|
||||
half,
|
||||
<vscale x 8 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 8 x half> @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v18, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x half> @llvm.riscv.vfslide1up.mask.nxv8f16.f16(
|
||||
<vscale x 8 x half> %0,
|
||||
<vscale x 8 x half> %1,
|
||||
half %2,
|
||||
<vscale x 8 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 8 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16(
|
||||
<vscale x 16 x half>,
|
||||
half,
|
||||
i32);
|
||||
|
||||
define <vscale x 16 x half> @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v28, v16, ft0
|
||||
; CHECK-NEXT: vmv4r.v v16, v28
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16(
|
||||
<vscale x 16 x half> %0,
|
||||
half %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 16 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x half> @llvm.riscv.vfslide1up.mask.nxv16f16.f16(
|
||||
<vscale x 16 x half>,
|
||||
<vscale x 16 x half>,
|
||||
half,
|
||||
<vscale x 16 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 16 x half> @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v20, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x half> @llvm.riscv.vfslide1up.mask.nxv16f16.f16(
|
||||
<vscale x 16 x half> %0,
|
||||
<vscale x 16 x half> %1,
|
||||
half %2,
|
||||
<vscale x 16 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 16 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16(
|
||||
<vscale x 32 x half>,
|
||||
half,
|
||||
i32);
|
||||
|
||||
define <vscale x 32 x half> @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v8, v16, ft0
|
||||
; CHECK-NEXT: vmv8r.v v16, v8
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16(
|
||||
<vscale x 32 x half> %0,
|
||||
half %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 32 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 32 x half> @llvm.riscv.vfslide1up.mask.nxv32f16.f16(
|
||||
<vscale x 32 x half>,
|
||||
<vscale x 32 x half>,
|
||||
half,
|
||||
<vscale x 32 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 32 x half> @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vle16.v v8, (a0)
|
||||
; CHECK-NEXT: fmv.h.x ft0, a1
|
||||
; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v8, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 32 x half> @llvm.riscv.vfslide1up.mask.nxv32f16.f16(
|
||||
<vscale x 32 x half> %0,
|
||||
<vscale x 32 x half> %1,
|
||||
half %2,
|
||||
<vscale x 32 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 32 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32(
|
||||
<vscale x 1 x float>,
|
||||
float,
|
||||
i32);
|
||||
|
||||
define <vscale x 1 x float> @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v25, v16, ft0
|
||||
; CHECK-NEXT: vmv1r.v v16, v25
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32(
|
||||
<vscale x 1 x float> %0,
|
||||
float %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 1 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32(
|
||||
<vscale x 1 x float>,
|
||||
<vscale x 1 x float>,
|
||||
float,
|
||||
<vscale x 1 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 1 x float> @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32(
|
||||
<vscale x 1 x float> %0,
|
||||
<vscale x 1 x float> %1,
|
||||
float %2,
|
||||
<vscale x 1 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 1 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32(
|
||||
<vscale x 2 x float>,
|
||||
float,
|
||||
i32);
|
||||
|
||||
define <vscale x 2 x float> @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v25, v16, ft0
|
||||
; CHECK-NEXT: vmv1r.v v16, v25
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32(
|
||||
<vscale x 2 x float> %0,
|
||||
float %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 2 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32(
|
||||
<vscale x 2 x float>,
|
||||
<vscale x 2 x float>,
|
||||
float,
|
||||
<vscale x 2 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 2 x float> @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32(
|
||||
<vscale x 2 x float> %0,
|
||||
<vscale x 2 x float> %1,
|
||||
float %2,
|
||||
<vscale x 2 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 2 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32(
|
||||
<vscale x 4 x float>,
|
||||
float,
|
||||
i32);
|
||||
|
||||
define <vscale x 4 x float> @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v26, v16, ft0
|
||||
; CHECK-NEXT: vmv2r.v v16, v26
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32(
|
||||
<vscale x 4 x float> %0,
|
||||
float %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 4 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32(
|
||||
<vscale x 4 x float>,
|
||||
<vscale x 4 x float>,
|
||||
float,
|
||||
<vscale x 4 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 4 x float> @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v18, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32(
|
||||
<vscale x 4 x float> %0,
|
||||
<vscale x 4 x float> %1,
|
||||
float %2,
|
||||
<vscale x 4 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 4 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32(
|
||||
<vscale x 8 x float>,
|
||||
float,
|
||||
i32);
|
||||
|
||||
define <vscale x 8 x float> @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v28, v16, ft0
|
||||
; CHECK-NEXT: vmv4r.v v16, v28
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32(
|
||||
<vscale x 8 x float> %0,
|
||||
float %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 8 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32(
|
||||
<vscale x 8 x float>,
|
||||
<vscale x 8 x float>,
|
||||
float,
|
||||
<vscale x 8 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 8 x float> @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v20, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32(
|
||||
<vscale x 8 x float> %0,
|
||||
<vscale x 8 x float> %1,
|
||||
float %2,
|
||||
<vscale x 8 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 8 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32(
|
||||
<vscale x 16 x float>,
|
||||
float,
|
||||
i32);
|
||||
|
||||
define <vscale x 16 x float> @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v8, v16, ft0
|
||||
; CHECK-NEXT: vmv8r.v v16, v8
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32(
|
||||
<vscale x 16 x float> %0,
|
||||
float %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 16 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32(
|
||||
<vscale x 16 x float>,
|
||||
<vscale x 16 x float>,
|
||||
float,
|
||||
<vscale x 16 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 16 x float> @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vle32.v v8, (a0)
|
||||
; CHECK-NEXT: fmv.w.x ft0, a1
|
||||
; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v8, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32(
|
||||
<vscale x 16 x float> %0,
|
||||
<vscale x 16 x float> %1,
|
||||
float %2,
|
||||
<vscale x 16 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 16 x float> %a
|
||||
}
|
|
@ -0,0 +1,713 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
|
||||
; RUN: --riscv-no-aliases < %s | FileCheck %s
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
|
||||
<vscale x 1 x half>,
|
||||
half,
|
||||
i64);
|
||||
|
||||
define <vscale x 1 x half> @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v25, v16, ft0
|
||||
; CHECK-NEXT: vmv1r.v v16, v25
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
|
||||
<vscale x 1 x half> %0,
|
||||
half %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 1 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x half>,
|
||||
half,
|
||||
<vscale x 1 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 1 x half> @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
|
||||
<vscale x 1 x half> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
half %2,
|
||||
<vscale x 1 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 1 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16(
|
||||
<vscale x 2 x half>,
|
||||
half,
|
||||
i64);
|
||||
|
||||
define <vscale x 2 x half> @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v25, v16, ft0
|
||||
; CHECK-NEXT: vmv1r.v v16, v25
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16(
|
||||
<vscale x 2 x half> %0,
|
||||
half %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 2 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x half> @llvm.riscv.vfslide1up.mask.nxv2f16.f16(
|
||||
<vscale x 2 x half>,
|
||||
<vscale x 2 x half>,
|
||||
half,
|
||||
<vscale x 2 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 2 x half> @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x half> @llvm.riscv.vfslide1up.mask.nxv2f16.f16(
|
||||
<vscale x 2 x half> %0,
|
||||
<vscale x 2 x half> %1,
|
||||
half %2,
|
||||
<vscale x 2 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 2 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16(
|
||||
<vscale x 4 x half>,
|
||||
half,
|
||||
i64);
|
||||
|
||||
define <vscale x 4 x half> @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v25, v16, ft0
|
||||
; CHECK-NEXT: vmv1r.v v16, v25
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16(
|
||||
<vscale x 4 x half> %0,
|
||||
half %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 4 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x half> @llvm.riscv.vfslide1up.mask.nxv4f16.f16(
|
||||
<vscale x 4 x half>,
|
||||
<vscale x 4 x half>,
|
||||
half,
|
||||
<vscale x 4 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 4 x half> @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x half> @llvm.riscv.vfslide1up.mask.nxv4f16.f16(
|
||||
<vscale x 4 x half> %0,
|
||||
<vscale x 4 x half> %1,
|
||||
half %2,
|
||||
<vscale x 4 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 4 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16(
|
||||
<vscale x 8 x half>,
|
||||
half,
|
||||
i64);
|
||||
|
||||
define <vscale x 8 x half> @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v26, v16, ft0
|
||||
; CHECK-NEXT: vmv2r.v v16, v26
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16(
|
||||
<vscale x 8 x half> %0,
|
||||
half %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 8 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x half> @llvm.riscv.vfslide1up.mask.nxv8f16.f16(
|
||||
<vscale x 8 x half>,
|
||||
<vscale x 8 x half>,
|
||||
half,
|
||||
<vscale x 8 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 8 x half> @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v18, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x half> @llvm.riscv.vfslide1up.mask.nxv8f16.f16(
|
||||
<vscale x 8 x half> %0,
|
||||
<vscale x 8 x half> %1,
|
||||
half %2,
|
||||
<vscale x 8 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 8 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16(
|
||||
<vscale x 16 x half>,
|
||||
half,
|
||||
i64);
|
||||
|
||||
define <vscale x 16 x half> @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v28, v16, ft0
|
||||
; CHECK-NEXT: vmv4r.v v16, v28
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16(
|
||||
<vscale x 16 x half> %0,
|
||||
half %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 16 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x half> @llvm.riscv.vfslide1up.mask.nxv16f16.f16(
|
||||
<vscale x 16 x half>,
|
||||
<vscale x 16 x half>,
|
||||
half,
|
||||
<vscale x 16 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 16 x half> @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v20, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x half> @llvm.riscv.vfslide1up.mask.nxv16f16.f16(
|
||||
<vscale x 16 x half> %0,
|
||||
<vscale x 16 x half> %1,
|
||||
half %2,
|
||||
<vscale x 16 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 16 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16(
|
||||
<vscale x 32 x half>,
|
||||
half,
|
||||
i64);
|
||||
|
||||
define <vscale x 32 x half> @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.h.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v8, v16, ft0
|
||||
; CHECK-NEXT: vmv8r.v v16, v8
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16(
|
||||
<vscale x 32 x half> %0,
|
||||
half %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 32 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 32 x half> @llvm.riscv.vfslide1up.mask.nxv32f16.f16(
|
||||
<vscale x 32 x half>,
|
||||
<vscale x 32 x half>,
|
||||
half,
|
||||
<vscale x 32 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 32 x half> @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vle16.v v8, (a0)
|
||||
; CHECK-NEXT: fmv.h.x ft0, a1
|
||||
; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v8, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 32 x half> @llvm.riscv.vfslide1up.mask.nxv32f16.f16(
|
||||
<vscale x 32 x half> %0,
|
||||
<vscale x 32 x half> %1,
|
||||
half %2,
|
||||
<vscale x 32 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 32 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32(
|
||||
<vscale x 1 x float>,
|
||||
float,
|
||||
i64);
|
||||
|
||||
define <vscale x 1 x float> @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v25, v16, ft0
|
||||
; CHECK-NEXT: vmv1r.v v16, v25
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32(
|
||||
<vscale x 1 x float> %0,
|
||||
float %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 1 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32(
|
||||
<vscale x 1 x float>,
|
||||
<vscale x 1 x float>,
|
||||
float,
|
||||
<vscale x 1 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 1 x float> @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32(
|
||||
<vscale x 1 x float> %0,
|
||||
<vscale x 1 x float> %1,
|
||||
float %2,
|
||||
<vscale x 1 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 1 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32(
|
||||
<vscale x 2 x float>,
|
||||
float,
|
||||
i64);
|
||||
|
||||
define <vscale x 2 x float> @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v25, v16, ft0
|
||||
; CHECK-NEXT: vmv1r.v v16, v25
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32(
|
||||
<vscale x 2 x float> %0,
|
||||
float %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 2 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32(
|
||||
<vscale x 2 x float>,
|
||||
<vscale x 2 x float>,
|
||||
float,
|
||||
<vscale x 2 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 2 x float> @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32(
|
||||
<vscale x 2 x float> %0,
|
||||
<vscale x 2 x float> %1,
|
||||
float %2,
|
||||
<vscale x 2 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 2 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32(
|
||||
<vscale x 4 x float>,
|
||||
float,
|
||||
i64);
|
||||
|
||||
define <vscale x 4 x float> @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v26, v16, ft0
|
||||
; CHECK-NEXT: vmv2r.v v16, v26
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32(
|
||||
<vscale x 4 x float> %0,
|
||||
float %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 4 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32(
|
||||
<vscale x 4 x float>,
|
||||
<vscale x 4 x float>,
|
||||
float,
|
||||
<vscale x 4 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 4 x float> @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v18, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32(
|
||||
<vscale x 4 x float> %0,
|
||||
<vscale x 4 x float> %1,
|
||||
float %2,
|
||||
<vscale x 4 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 4 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32(
|
||||
<vscale x 8 x float>,
|
||||
float,
|
||||
i64);
|
||||
|
||||
define <vscale x 8 x float> @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v28, v16, ft0
|
||||
; CHECK-NEXT: vmv4r.v v16, v28
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32(
|
||||
<vscale x 8 x float> %0,
|
||||
float %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 8 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32(
|
||||
<vscale x 8 x float>,
|
||||
<vscale x 8 x float>,
|
||||
float,
|
||||
<vscale x 8 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 8 x float> @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v20, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32(
|
||||
<vscale x 8 x float> %0,
|
||||
<vscale x 8 x float> %1,
|
||||
float %2,
|
||||
<vscale x 8 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 8 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32(
|
||||
<vscale x 16 x float>,
|
||||
float,
|
||||
i64);
|
||||
|
||||
define <vscale x 16 x float> @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.w.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v8, v16, ft0
|
||||
; CHECK-NEXT: vmv8r.v v16, v8
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32(
|
||||
<vscale x 16 x float> %0,
|
||||
float %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 16 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32(
|
||||
<vscale x 16 x float>,
|
||||
<vscale x 16 x float>,
|
||||
float,
|
||||
<vscale x 16 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 16 x float> @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vle32.v v8, (a0)
|
||||
; CHECK-NEXT: fmv.w.x ft0, a1
|
||||
; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v8, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32(
|
||||
<vscale x 16 x float> %0,
|
||||
<vscale x 16 x float> %1,
|
||||
float %2,
|
||||
<vscale x 16 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 16 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64(
|
||||
<vscale x 1 x double>,
|
||||
double,
|
||||
i64);
|
||||
|
||||
define <vscale x 1 x double> @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.d.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v25, v16, ft0
|
||||
; CHECK-NEXT: vmv1r.v v16, v25
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64(
|
||||
<vscale x 1 x double> %0,
|
||||
double %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 1 x double> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64(
|
||||
<vscale x 1 x double>,
|
||||
<vscale x 1 x double>,
|
||||
double,
|
||||
<vscale x 1 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 1 x double> @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.d.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64(
|
||||
<vscale x 1 x double> %0,
|
||||
<vscale x 1 x double> %1,
|
||||
double %2,
|
||||
<vscale x 1 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 1 x double> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64(
|
||||
<vscale x 2 x double>,
|
||||
double,
|
||||
i64);
|
||||
|
||||
define <vscale x 2 x double> @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.d.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v26, v16, ft0
|
||||
; CHECK-NEXT: vmv2r.v v16, v26
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64(
|
||||
<vscale x 2 x double> %0,
|
||||
double %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 2 x double> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64(
|
||||
<vscale x 2 x double>,
|
||||
<vscale x 2 x double>,
|
||||
double,
|
||||
<vscale x 2 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 2 x double> @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.d.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v18, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64(
|
||||
<vscale x 2 x double> %0,
|
||||
<vscale x 2 x double> %1,
|
||||
double %2,
|
||||
<vscale x 2 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 2 x double> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64(
|
||||
<vscale x 4 x double>,
|
||||
double,
|
||||
i64);
|
||||
|
||||
define <vscale x 4 x double> @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.d.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v28, v16, ft0
|
||||
; CHECK-NEXT: vmv4r.v v16, v28
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64(
|
||||
<vscale x 4 x double> %0,
|
||||
double %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 4 x double> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64(
|
||||
<vscale x 4 x double>,
|
||||
<vscale x 4 x double>,
|
||||
double,
|
||||
<vscale x 4 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 4 x double> @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.d.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v20, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64(
|
||||
<vscale x 4 x double> %0,
|
||||
<vscale x 4 x double> %1,
|
||||
double %2,
|
||||
<vscale x 4 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 4 x double> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64(
|
||||
<vscale x 8 x double>,
|
||||
double,
|
||||
i64);
|
||||
|
||||
define <vscale x 8 x double> @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: fmv.d.x ft0, a0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e64,m8,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v8, v16, ft0
|
||||
; CHECK-NEXT: vmv8r.v v16, v8
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64(
|
||||
<vscale x 8 x double> %0,
|
||||
double %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 8 x double> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64(
|
||||
<vscale x 8 x double>,
|
||||
<vscale x 8 x double>,
|
||||
double,
|
||||
<vscale x 8 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 8 x double> @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
|
||||
; CHECK-NEXT: vle64.v v8, (a0)
|
||||
; CHECK-NEXT: fmv.d.x ft0, a1
|
||||
; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu
|
||||
; CHECK-NEXT: vfslide1up.vf v16, v8, ft0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64(
|
||||
<vscale x 8 x double> %0,
|
||||
<vscale x 8 x double> %1,
|
||||
double %2,
|
||||
<vscale x 8 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 8 x double> %a
|
||||
}
|
|
@ -0,0 +1,800 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
|
||||
; RUN: --riscv-no-aliases < %s | FileCheck %s
|
||||
declare <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8(
|
||||
<vscale x 1 x i8>,
|
||||
i8,
|
||||
i32);
|
||||
|
||||
define <vscale x 1 x i8> @intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8(
|
||||
<vscale x 1 x i8> %0,
|
||||
i8 %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 1 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8(
|
||||
<vscale x 1 x i8>,
|
||||
<vscale x 1 x i8>,
|
||||
i8,
|
||||
<vscale x 1 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 1 x i8> @intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8(
|
||||
<vscale x 1 x i8> %0,
|
||||
<vscale x 1 x i8> %1,
|
||||
i8 %2,
|
||||
<vscale x 1 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 1 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8(
|
||||
<vscale x 2 x i8>,
|
||||
i8,
|
||||
i32);
|
||||
|
||||
define <vscale x 2 x i8> @intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8(
|
||||
<vscale x 2 x i8> %0,
|
||||
i8 %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 2 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8(
|
||||
<vscale x 2 x i8>,
|
||||
<vscale x 2 x i8>,
|
||||
i8,
|
||||
<vscale x 2 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 2 x i8> @intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8(
|
||||
<vscale x 2 x i8> %0,
|
||||
<vscale x 2 x i8> %1,
|
||||
i8 %2,
|
||||
<vscale x 2 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 2 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8(
|
||||
<vscale x 4 x i8>,
|
||||
i8,
|
||||
i32);
|
||||
|
||||
define <vscale x 4 x i8> @intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8(
|
||||
<vscale x 4 x i8> %0,
|
||||
i8 %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 4 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8(
|
||||
<vscale x 4 x i8>,
|
||||
<vscale x 4 x i8>,
|
||||
i8,
|
||||
<vscale x 4 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 4 x i8> @intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8(
|
||||
<vscale x 4 x i8> %0,
|
||||
<vscale x 4 x i8> %1,
|
||||
i8 %2,
|
||||
<vscale x 4 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 4 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8(
|
||||
<vscale x 8 x i8>,
|
||||
i8,
|
||||
i32);
|
||||
|
||||
define <vscale x 8 x i8> @intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8(
|
||||
<vscale x 8 x i8> %0,
|
||||
i8 %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 8 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8(
|
||||
<vscale x 8 x i8>,
|
||||
<vscale x 8 x i8>,
|
||||
i8,
|
||||
<vscale x 8 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 8 x i8> @intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8(
|
||||
<vscale x 8 x i8> %0,
|
||||
<vscale x 8 x i8> %1,
|
||||
i8 %2,
|
||||
<vscale x 8 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 8 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8(
|
||||
<vscale x 16 x i8>,
|
||||
i8,
|
||||
i32);
|
||||
|
||||
define <vscale x 16 x i8> @intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8(
|
||||
<vscale x 16 x i8> %0,
|
||||
i8 %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 16 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8(
|
||||
<vscale x 16 x i8>,
|
||||
<vscale x 16 x i8>,
|
||||
i8,
|
||||
<vscale x 16 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 16 x i8> @intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v18, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8(
|
||||
<vscale x 16 x i8> %0,
|
||||
<vscale x 16 x i8> %1,
|
||||
i8 %2,
|
||||
<vscale x 16 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 16 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8(
|
||||
<vscale x 32 x i8>,
|
||||
i8,
|
||||
i32);
|
||||
|
||||
define <vscale x 32 x i8> @intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8(
|
||||
<vscale x 32 x i8> %0,
|
||||
i8 %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 32 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8(
|
||||
<vscale x 32 x i8>,
|
||||
<vscale x 32 x i8>,
|
||||
i8,
|
||||
<vscale x 32 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 32 x i8> @intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v20, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8(
|
||||
<vscale x 32 x i8> %0,
|
||||
<vscale x 32 x i8> %1,
|
||||
i8 %2,
|
||||
<vscale x 32 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 32 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8(
|
||||
<vscale x 64 x i8>,
|
||||
i8,
|
||||
i32);
|
||||
|
||||
define <vscale x 64 x i8> @intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8(
|
||||
<vscale x 64 x i8> %0,
|
||||
i8 %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 64 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 64 x i8> @llvm.riscv.vslide1down.mask.nxv64i8.i8(
|
||||
<vscale x 64 x i8>,
|
||||
<vscale x 64 x i8>,
|
||||
i8,
|
||||
<vscale x 64 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 64 x i8> @intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu
|
||||
; CHECK-NEXT: vle8.v v8, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, a2, e8,m8,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v8, a1, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 64 x i8> @llvm.riscv.vslide1down.mask.nxv64i8.i8(
|
||||
<vscale x 64 x i8> %0,
|
||||
<vscale x 64 x i8> %1,
|
||||
i8 %2,
|
||||
<vscale x 64 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 64 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16(
|
||||
<vscale x 1 x i16>,
|
||||
i16,
|
||||
i32);
|
||||
|
||||
define <vscale x 1 x i16> @intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16(
|
||||
<vscale x 1 x i16> %0,
|
||||
i16 %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 1 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16(
|
||||
<vscale x 1 x i16>,
|
||||
<vscale x 1 x i16>,
|
||||
i16,
|
||||
<vscale x 1 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 1 x i16> @intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16(
|
||||
<vscale x 1 x i16> %0,
|
||||
<vscale x 1 x i16> %1,
|
||||
i16 %2,
|
||||
<vscale x 1 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 1 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16(
|
||||
<vscale x 2 x i16>,
|
||||
i16,
|
||||
i32);
|
||||
|
||||
define <vscale x 2 x i16> @intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16(
|
||||
<vscale x 2 x i16> %0,
|
||||
i16 %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 2 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16(
|
||||
<vscale x 2 x i16>,
|
||||
<vscale x 2 x i16>,
|
||||
i16,
|
||||
<vscale x 2 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 2 x i16> @intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16(
|
||||
<vscale x 2 x i16> %0,
|
||||
<vscale x 2 x i16> %1,
|
||||
i16 %2,
|
||||
<vscale x 2 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 2 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16(
|
||||
<vscale x 4 x i16>,
|
||||
i16,
|
||||
i32);
|
||||
|
||||
define <vscale x 4 x i16> @intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16(
|
||||
<vscale x 4 x i16> %0,
|
||||
i16 %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 4 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16(
|
||||
<vscale x 4 x i16>,
|
||||
<vscale x 4 x i16>,
|
||||
i16,
|
||||
<vscale x 4 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 4 x i16> @intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16(
|
||||
<vscale x 4 x i16> %0,
|
||||
<vscale x 4 x i16> %1,
|
||||
i16 %2,
|
||||
<vscale x 4 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 4 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16(
|
||||
<vscale x 8 x i16>,
|
||||
i16,
|
||||
i32);
|
||||
|
||||
define <vscale x 8 x i16> @intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16(
|
||||
<vscale x 8 x i16> %0,
|
||||
i16 %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 8 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16(
|
||||
<vscale x 8 x i16>,
|
||||
<vscale x 8 x i16>,
|
||||
i16,
|
||||
<vscale x 8 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 8 x i16> @intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v18, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16(
|
||||
<vscale x 8 x i16> %0,
|
||||
<vscale x 8 x i16> %1,
|
||||
i16 %2,
|
||||
<vscale x 8 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 8 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16(
|
||||
<vscale x 16 x i16>,
|
||||
i16,
|
||||
i32);
|
||||
|
||||
define <vscale x 16 x i16> @intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16(
|
||||
<vscale x 16 x i16> %0,
|
||||
i16 %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 16 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16(
|
||||
<vscale x 16 x i16>,
|
||||
<vscale x 16 x i16>,
|
||||
i16,
|
||||
<vscale x 16 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 16 x i16> @intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v20, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16(
|
||||
<vscale x 16 x i16> %0,
|
||||
<vscale x 16 x i16> %1,
|
||||
i16 %2,
|
||||
<vscale x 16 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 16 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16(
|
||||
<vscale x 32 x i16>,
|
||||
i16,
|
||||
i32);
|
||||
|
||||
define <vscale x 32 x i16> @intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16(
|
||||
<vscale x 32 x i16> %0,
|
||||
i16 %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 32 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 32 x i16> @llvm.riscv.vslide1down.mask.nxv32i16.i16(
|
||||
<vscale x 32 x i16>,
|
||||
<vscale x 32 x i16>,
|
||||
i16,
|
||||
<vscale x 32 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 32 x i16> @intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vle16.v v8, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v8, a1, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 32 x i16> @llvm.riscv.vslide1down.mask.nxv32i16.i16(
|
||||
<vscale x 32 x i16> %0,
|
||||
<vscale x 32 x i16> %1,
|
||||
i16 %2,
|
||||
<vscale x 32 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 32 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32(
|
||||
<vscale x 1 x i32>,
|
||||
i32,
|
||||
i32);
|
||||
|
||||
define <vscale x 1 x i32> @intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32(
|
||||
<vscale x 1 x i32> %0,
|
||||
i32 %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 1 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x i32>,
|
||||
i32,
|
||||
<vscale x 1 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 1 x i32> @intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32(
|
||||
<vscale x 1 x i32> %0,
|
||||
<vscale x 1 x i32> %1,
|
||||
i32 %2,
|
||||
<vscale x 1 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 1 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32(
|
||||
<vscale x 2 x i32>,
|
||||
i32,
|
||||
i32);
|
||||
|
||||
define <vscale x 2 x i32> @intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32(
|
||||
<vscale x 2 x i32> %0,
|
||||
i32 %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 2 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x i32>,
|
||||
i32,
|
||||
<vscale x 2 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 2 x i32> @intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32(
|
||||
<vscale x 2 x i32> %0,
|
||||
<vscale x 2 x i32> %1,
|
||||
i32 %2,
|
||||
<vscale x 2 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 2 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32(
|
||||
<vscale x 4 x i32>,
|
||||
i32,
|
||||
i32);
|
||||
|
||||
define <vscale x 4 x i32> @intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32(
|
||||
<vscale x 4 x i32> %0,
|
||||
i32 %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 4 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x i32>,
|
||||
i32,
|
||||
<vscale x 4 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 4 x i32> @intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v18, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32(
|
||||
<vscale x 4 x i32> %0,
|
||||
<vscale x 4 x i32> %1,
|
||||
i32 %2,
|
||||
<vscale x 4 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 4 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32(
|
||||
<vscale x 8 x i32>,
|
||||
i32,
|
||||
i32);
|
||||
|
||||
define <vscale x 8 x i32> @intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32(
|
||||
<vscale x 8 x i32> %0,
|
||||
i32 %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 8 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x i32>,
|
||||
i32,
|
||||
<vscale x 8 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 8 x i32> @intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v20, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32(
|
||||
<vscale x 8 x i32> %0,
|
||||
<vscale x 8 x i32> %1,
|
||||
i32 %2,
|
||||
<vscale x 8 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 8 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32(
|
||||
<vscale x 16 x i32>,
|
||||
i32,
|
||||
i32);
|
||||
|
||||
define <vscale x 16 x i32> @intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32(
|
||||
<vscale x 16 x i32> %0,
|
||||
i32 %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 16 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.vslide1down.mask.nxv16i32.i32(
|
||||
<vscale x 16 x i32>,
|
||||
<vscale x 16 x i32>,
|
||||
i32,
|
||||
<vscale x 16 x i1>,
|
||||
i32);
|
||||
|
||||
define <vscale x 16 x i32> @intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vle32.v v8, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v8, a1, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.vslide1down.mask.nxv16i32.i32(
|
||||
<vscale x 16 x i32> %0,
|
||||
<vscale x 16 x i32> %1,
|
||||
i32 %2,
|
||||
<vscale x 16 x i1> %3,
|
||||
i32 %4)
|
||||
|
||||
ret <vscale x 16 x i32> %a
|
||||
}
|
|
@ -0,0 +1,978 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
|
||||
; RUN: --riscv-no-aliases < %s | FileCheck %s
|
||||
declare <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8(
|
||||
<vscale x 1 x i8>,
|
||||
i8,
|
||||
i64);
|
||||
|
||||
define <vscale x 1 x i8> @intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8(
|
||||
<vscale x 1 x i8> %0,
|
||||
i8 %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 1 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8(
|
||||
<vscale x 1 x i8>,
|
||||
<vscale x 1 x i8>,
|
||||
i8,
|
||||
<vscale x 1 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 1 x i8> @intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8(
|
||||
<vscale x 1 x i8> %0,
|
||||
<vscale x 1 x i8> %1,
|
||||
i8 %2,
|
||||
<vscale x 1 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 1 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8(
|
||||
<vscale x 2 x i8>,
|
||||
i8,
|
||||
i64);
|
||||
|
||||
define <vscale x 2 x i8> @intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8(
|
||||
<vscale x 2 x i8> %0,
|
||||
i8 %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 2 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8(
|
||||
<vscale x 2 x i8>,
|
||||
<vscale x 2 x i8>,
|
||||
i8,
|
||||
<vscale x 2 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 2 x i8> @intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8(
|
||||
<vscale x 2 x i8> %0,
|
||||
<vscale x 2 x i8> %1,
|
||||
i8 %2,
|
||||
<vscale x 2 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 2 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8(
|
||||
<vscale x 4 x i8>,
|
||||
i8,
|
||||
i64);
|
||||
|
||||
define <vscale x 4 x i8> @intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8(
|
||||
<vscale x 4 x i8> %0,
|
||||
i8 %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 4 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8(
|
||||
<vscale x 4 x i8>,
|
||||
<vscale x 4 x i8>,
|
||||
i8,
|
||||
<vscale x 4 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 4 x i8> @intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8(
|
||||
<vscale x 4 x i8> %0,
|
||||
<vscale x 4 x i8> %1,
|
||||
i8 %2,
|
||||
<vscale x 4 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 4 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8(
|
||||
<vscale x 8 x i8>,
|
||||
i8,
|
||||
i64);
|
||||
|
||||
define <vscale x 8 x i8> @intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8(
|
||||
<vscale x 8 x i8> %0,
|
||||
i8 %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 8 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8(
|
||||
<vscale x 8 x i8>,
|
||||
<vscale x 8 x i8>,
|
||||
i8,
|
||||
<vscale x 8 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 8 x i8> @intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8(
|
||||
<vscale x 8 x i8> %0,
|
||||
<vscale x 8 x i8> %1,
|
||||
i8 %2,
|
||||
<vscale x 8 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 8 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8(
|
||||
<vscale x 16 x i8>,
|
||||
i8,
|
||||
i64);
|
||||
|
||||
define <vscale x 16 x i8> @intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8(
|
||||
<vscale x 16 x i8> %0,
|
||||
i8 %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 16 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8(
|
||||
<vscale x 16 x i8>,
|
||||
<vscale x 16 x i8>,
|
||||
i8,
|
||||
<vscale x 16 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 16 x i8> @intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v18, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8(
|
||||
<vscale x 16 x i8> %0,
|
||||
<vscale x 16 x i8> %1,
|
||||
i8 %2,
|
||||
<vscale x 16 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 16 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8(
|
||||
<vscale x 32 x i8>,
|
||||
i8,
|
||||
i64);
|
||||
|
||||
define <vscale x 32 x i8> @intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8(
|
||||
<vscale x 32 x i8> %0,
|
||||
i8 %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 32 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8(
|
||||
<vscale x 32 x i8>,
|
||||
<vscale x 32 x i8>,
|
||||
i8,
|
||||
<vscale x 32 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 32 x i8> @intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v20, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8(
|
||||
<vscale x 32 x i8> %0,
|
||||
<vscale x 32 x i8> %1,
|
||||
i8 %2,
|
||||
<vscale x 32 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 32 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8(
|
||||
<vscale x 64 x i8>,
|
||||
i8,
|
||||
i64);
|
||||
|
||||
define <vscale x 64 x i8> @intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8(
|
||||
<vscale x 64 x i8> %0,
|
||||
i8 %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 64 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 64 x i8> @llvm.riscv.vslide1down.mask.nxv64i8.i8(
|
||||
<vscale x 64 x i8>,
|
||||
<vscale x 64 x i8>,
|
||||
i8,
|
||||
<vscale x 64 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 64 x i8> @intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu
|
||||
; CHECK-NEXT: vle8.v v8, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, a2, e8,m8,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v8, a1, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 64 x i8> @llvm.riscv.vslide1down.mask.nxv64i8.i8(
|
||||
<vscale x 64 x i8> %0,
|
||||
<vscale x 64 x i8> %1,
|
||||
i8 %2,
|
||||
<vscale x 64 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 64 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16(
|
||||
<vscale x 1 x i16>,
|
||||
i16,
|
||||
i64);
|
||||
|
||||
define <vscale x 1 x i16> @intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16(
|
||||
<vscale x 1 x i16> %0,
|
||||
i16 %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 1 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16(
|
||||
<vscale x 1 x i16>,
|
||||
<vscale x 1 x i16>,
|
||||
i16,
|
||||
<vscale x 1 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 1 x i16> @intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16(
|
||||
<vscale x 1 x i16> %0,
|
||||
<vscale x 1 x i16> %1,
|
||||
i16 %2,
|
||||
<vscale x 1 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 1 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16(
|
||||
<vscale x 2 x i16>,
|
||||
i16,
|
||||
i64);
|
||||
|
||||
define <vscale x 2 x i16> @intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16(
|
||||
<vscale x 2 x i16> %0,
|
||||
i16 %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 2 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16(
|
||||
<vscale x 2 x i16>,
|
||||
<vscale x 2 x i16>,
|
||||
i16,
|
||||
<vscale x 2 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 2 x i16> @intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16(
|
||||
<vscale x 2 x i16> %0,
|
||||
<vscale x 2 x i16> %1,
|
||||
i16 %2,
|
||||
<vscale x 2 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 2 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16(
|
||||
<vscale x 4 x i16>,
|
||||
i16,
|
||||
i64);
|
||||
|
||||
define <vscale x 4 x i16> @intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16(
|
||||
<vscale x 4 x i16> %0,
|
||||
i16 %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 4 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16(
|
||||
<vscale x 4 x i16>,
|
||||
<vscale x 4 x i16>,
|
||||
i16,
|
||||
<vscale x 4 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 4 x i16> @intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16(
|
||||
<vscale x 4 x i16> %0,
|
||||
<vscale x 4 x i16> %1,
|
||||
i16 %2,
|
||||
<vscale x 4 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 4 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16(
|
||||
<vscale x 8 x i16>,
|
||||
i16,
|
||||
i64);
|
||||
|
||||
define <vscale x 8 x i16> @intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16(
|
||||
<vscale x 8 x i16> %0,
|
||||
i16 %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 8 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16(
|
||||
<vscale x 8 x i16>,
|
||||
<vscale x 8 x i16>,
|
||||
i16,
|
||||
<vscale x 8 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 8 x i16> @intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v18, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16(
|
||||
<vscale x 8 x i16> %0,
|
||||
<vscale x 8 x i16> %1,
|
||||
i16 %2,
|
||||
<vscale x 8 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 8 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16(
|
||||
<vscale x 16 x i16>,
|
||||
i16,
|
||||
i64);
|
||||
|
||||
define <vscale x 16 x i16> @intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16(
|
||||
<vscale x 16 x i16> %0,
|
||||
i16 %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 16 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16(
|
||||
<vscale x 16 x i16>,
|
||||
<vscale x 16 x i16>,
|
||||
i16,
|
||||
<vscale x 16 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 16 x i16> @intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v20, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16(
|
||||
<vscale x 16 x i16> %0,
|
||||
<vscale x 16 x i16> %1,
|
||||
i16 %2,
|
||||
<vscale x 16 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 16 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16(
|
||||
<vscale x 32 x i16>,
|
||||
i16,
|
||||
i64);
|
||||
|
||||
define <vscale x 32 x i16> @intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16(
|
||||
<vscale x 32 x i16> %0,
|
||||
i16 %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 32 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 32 x i16> @llvm.riscv.vslide1down.mask.nxv32i16.i16(
|
||||
<vscale x 32 x i16>,
|
||||
<vscale x 32 x i16>,
|
||||
i16,
|
||||
<vscale x 32 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 32 x i16> @intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vle16.v v8, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v8, a1, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 32 x i16> @llvm.riscv.vslide1down.mask.nxv32i16.i16(
|
||||
<vscale x 32 x i16> %0,
|
||||
<vscale x 32 x i16> %1,
|
||||
i16 %2,
|
||||
<vscale x 32 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 32 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32(
|
||||
<vscale x 1 x i32>,
|
||||
i32,
|
||||
i64);
|
||||
|
||||
define <vscale x 1 x i32> @intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32(
|
||||
<vscale x 1 x i32> %0,
|
||||
i32 %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 1 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x i32>,
|
||||
i32,
|
||||
<vscale x 1 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 1 x i32> @intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32(
|
||||
<vscale x 1 x i32> %0,
|
||||
<vscale x 1 x i32> %1,
|
||||
i32 %2,
|
||||
<vscale x 1 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 1 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32(
|
||||
<vscale x 2 x i32>,
|
||||
i32,
|
||||
i64);
|
||||
|
||||
define <vscale x 2 x i32> @intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32(
|
||||
<vscale x 2 x i32> %0,
|
||||
i32 %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 2 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x i32>,
|
||||
i32,
|
||||
<vscale x 2 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 2 x i32> @intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32(
|
||||
<vscale x 2 x i32> %0,
|
||||
<vscale x 2 x i32> %1,
|
||||
i32 %2,
|
||||
<vscale x 2 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 2 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32(
|
||||
<vscale x 4 x i32>,
|
||||
i32,
|
||||
i64);
|
||||
|
||||
define <vscale x 4 x i32> @intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32(
|
||||
<vscale x 4 x i32> %0,
|
||||
i32 %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 4 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x i32>,
|
||||
i32,
|
||||
<vscale x 4 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 4 x i32> @intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v18, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32(
|
||||
<vscale x 4 x i32> %0,
|
||||
<vscale x 4 x i32> %1,
|
||||
i32 %2,
|
||||
<vscale x 4 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 4 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32(
|
||||
<vscale x 8 x i32>,
|
||||
i32,
|
||||
i64);
|
||||
|
||||
define <vscale x 8 x i32> @intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32(
|
||||
<vscale x 8 x i32> %0,
|
||||
i32 %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 8 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x i32>,
|
||||
i32,
|
||||
<vscale x 8 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 8 x i32> @intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v20, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32(
|
||||
<vscale x 8 x i32> %0,
|
||||
<vscale x 8 x i32> %1,
|
||||
i32 %2,
|
||||
<vscale x 8 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 8 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32(
|
||||
<vscale x 16 x i32>,
|
||||
i32,
|
||||
i64);
|
||||
|
||||
define <vscale x 16 x i32> @intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32(
|
||||
<vscale x 16 x i32> %0,
|
||||
i32 %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 16 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.vslide1down.mask.nxv16i32.i32(
|
||||
<vscale x 16 x i32>,
|
||||
<vscale x 16 x i32>,
|
||||
i32,
|
||||
<vscale x 16 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 16 x i32> @intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vle32.v v8, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v8, a1, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.vslide1down.mask.nxv16i32.i32(
|
||||
<vscale x 16 x i32> %0,
|
||||
<vscale x 16 x i32> %1,
|
||||
i32 %2,
|
||||
<vscale x 16 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 16 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
|
||||
<vscale x 1 x i64>,
|
||||
i64,
|
||||
i64);
|
||||
|
||||
define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
|
||||
<vscale x 1 x i64> %0,
|
||||
i64 %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 1 x i64> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x i64>,
|
||||
i64,
|
||||
<vscale x 1 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 1 x i64> @intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v17, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
|
||||
<vscale x 1 x i64> %0,
|
||||
<vscale x 1 x i64> %1,
|
||||
i64 %2,
|
||||
<vscale x 1 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 1 x i64> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64(
|
||||
<vscale x 2 x i64>,
|
||||
i64,
|
||||
i64);
|
||||
|
||||
define <vscale x 2 x i64> @intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64(
|
||||
<vscale x 2 x i64> %0,
|
||||
i64 %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 2 x i64> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vslide1down.mask.nxv2i64.i64(
|
||||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x i64>,
|
||||
i64,
|
||||
<vscale x 2 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 2 x i64> @intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v18, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vslide1down.mask.nxv2i64.i64(
|
||||
<vscale x 2 x i64> %0,
|
||||
<vscale x 2 x i64> %1,
|
||||
i64 %2,
|
||||
<vscale x 2 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 2 x i64> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64(
|
||||
<vscale x 4 x i64>,
|
||||
i64,
|
||||
i64);
|
||||
|
||||
define <vscale x 4 x i64> @intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64(
|
||||
<vscale x 4 x i64> %0,
|
||||
i64 %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 4 x i64> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vslide1down.mask.nxv4i64.i64(
|
||||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x i64>,
|
||||
i64,
|
||||
<vscale x 4 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 4 x i64> @intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v20, a0, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vslide1down.mask.nxv4i64.i64(
|
||||
<vscale x 4 x i64> %0,
|
||||
<vscale x 4 x i64> %1,
|
||||
i64 %2,
|
||||
<vscale x 4 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 4 x i64> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64(
|
||||
<vscale x 8 x i64>,
|
||||
i64,
|
||||
i64);
|
||||
|
||||
define <vscale x 8 x i64> @intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v16, a0
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64(
|
||||
<vscale x 8 x i64> %0,
|
||||
i64 %1,
|
||||
i64 %2)
|
||||
|
||||
ret <vscale x 8 x i64> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vslide1down.mask.nxv8i64.i64(
|
||||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x i64>,
|
||||
i64,
|
||||
<vscale x 8 x i1>,
|
||||
i64);
|
||||
|
||||
define <vscale x 8 x i64> @intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
|
||||
; CHECK-NEXT: vle64.v v8, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu
|
||||
; CHECK-NEXT: vslide1down.vx v16, v8, a1, v0.t
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vslide1down.mask.nxv8i64.i64(
|
||||
<vscale x 8 x i64> %0,
|
||||
<vscale x 8 x i64> %1,
|
||||
i64 %2,
|
||||
<vscale x 8 x i1> %3,
|
||||
i64 %4)
|
||||
|
||||
ret <vscale x 8 x i64> %a
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
|
||||
; RUN: --riscv-no-aliases < %s | FileCheck %s
|
||||
declare <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8(
|
||||
<vscale x 1 x i8>,
|
||||
i8,
|
||||
i32);
|
||||
|
||||
define <vscale x 1 x i8> @intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
|
||||
; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
|
||||
; CHECK-NEXT: vslide1up.vx v25, v16, a0
|
||||
; CHECK-NEXT: vmv1r.v v16, v25
|
||||
; CHECK-NEXT: jalr zero, 0(ra)
|
||||
entry:
|
||||
%a = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8(
|
||||
<vscale x 1 x i8> %0,
|
||||
i8 %1,
|
||||
i32 %2)
|
||||
|
||||
ret <vscale x 1 x i8> %a
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue