[RISCV] Add more nxvi64 vector intrinsic tests for RV32. NFC

This confirms we handle most instrutions gracefully. We do
currently fail for vslide1up and vslide1down though.
This commit is contained in:
Craig Topper 2021-04-01 20:20:15 -07:00
parent d441dee5c2
commit 5a9a8c7cd4
99 changed files with 35237 additions and 31 deletions

View File

@ -397,6 +397,94 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vcompress_vm_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vcompress.vm v8, v9, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vcompress_vm_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vcompress.vm v8, v10, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vcompress_vm_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vcompress.vm v8, v12, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vcompress_vm_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: vcompress.vm v8, v16, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,

View File

@ -829,5 +829,6 @@ entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i1> %1,
i64 %2)
ret <vscale x 1 x i8> %a
}

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vdiv.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vdiv.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vdiv.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vdiv.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vdiv.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vdiv.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1587,3 +1764,249 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i64> @intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vdiv.vv v8, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vdiv.vv v8, v9, v25, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i64> @intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vdiv.vv v8, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vdiv.vv v8, v10, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i64> @intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vdiv.vv v8, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vdiv.vv v8, v12, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
i32);
define <vscale x 8 x i64> @intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vdiv.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vdiv_mask_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrrs a3, vlenb, zero
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v24, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v0, v24, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v24, v24, v0
; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vdivu.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vdivu.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vdivu.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vdivu.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vdivu.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vdivu.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vdivu.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1587,3 +1764,249 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i64> @intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vdivu.vv v8, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vdivu.vv v8, v9, v25, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i64> @intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vdivu.vv v8, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vdivu.vv v8, v10, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i64> @intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vdivu.vv v8, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vdivu.vv v8, v12, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
i32);
define <vscale x 8 x i64> @intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vdivu.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vdivu_mask_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrrs a3, vlenb, zero
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v24, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v0, v24, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v24, v24, v0
; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
<vscale x 1 x half>,
@ -506,3 +506,187 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1i64(
<vscale x 1 x double>,
i32);
define <vscale x 1 x i64> @intrinsic_vfclass_v_nxv1i64_nxv1f64(
; CHECK-LABEL: intrinsic_vfclass_v_nxv1i64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vfclass.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 1 x double> %0,
i32 %1) nounwind {
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1i64(
<vscale x 1 x double> %0,
i32 %1)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x double>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vfclass_mask_v_nxv1i64_nxv1f64(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vfclass.v v8, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 1 x i64> %0,
<vscale x 1 x double> %1,
<vscale x 1 x i1> %2,
i32 %3) nounwind {
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x double> %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2i64(
<vscale x 2 x double>,
i32);
define <vscale x 2 x i64> @intrinsic_vfclass_v_nxv2i64_nxv2f64(
; CHECK-LABEL: intrinsic_vfclass_v_nxv2i64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vfclass.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 2 x double> %0,
i32 %1) nounwind {
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2i64(
<vscale x 2 x double> %0,
i32 %1)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x double>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vfclass_mask_v_nxv2i64_nxv2f64(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vfclass.v v8, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 2 x i64> %0,
<vscale x 2 x double> %1,
<vscale x 2 x i1> %2,
i32 %3) nounwind {
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x double> %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4i64(
<vscale x 4 x double>,
i32);
define <vscale x 4 x i64> @intrinsic_vfclass_v_nxv4i64_nxv4f64(
; CHECK-LABEL: intrinsic_vfclass_v_nxv4i64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vfclass.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 4 x double> %0,
i32 %1) nounwind {
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4i64(
<vscale x 4 x double> %0,
i32 %1)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x double>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vfclass_mask_v_nxv4i64_nxv4f64(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vfclass.v v8, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 4 x i64> %0,
<vscale x 4 x double> %1,
<vscale x 4 x i1> %2,
i32 %3) nounwind {
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x double> %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8i64(
<vscale x 8 x double>,
i32);
define <vscale x 8 x i64> @intrinsic_vfclass_v_nxv8i64_nxv8f64(
; CHECK-LABEL: intrinsic_vfclass_v_nxv8i64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vfclass.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 8 x double> %0,
i32 %1) nounwind {
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8i64(
<vscale x 8 x double> %0,
i32 %1)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x double>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vfclass_mask_v_nxv8i64_nxv8f64(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: vfclass.v v8, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
<vscale x 8 x i64> %0,
<vscale x 8 x double> %1,
<vscale x 8 x i1> %2,
i32 %3) nounwind {
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x double> %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
<vscale x 1 x i16>,
@ -440,3 +440,163 @@ entry:
ret <vscale x 16 x float> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64(
<vscale x 1 x i64>,
i32);
define <vscale x 1 x double> @intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vfcvt.f.x.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64(
<vscale x 1 x i64> %0,
i32 %1)
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64(
<vscale x 1 x double>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x double> @intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64(
<vscale x 1 x double> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64(
<vscale x 2 x i64>,
i32);
define <vscale x 2 x double> @intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vfcvt.f.x.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64(
<vscale x 2 x i64> %0,
i32 %1)
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64(
<vscale x 2 x double>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x double> @intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64(
<vscale x 2 x double> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64(
<vscale x 4 x i64>,
i32);
define <vscale x 4 x double> @intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vfcvt.f.x.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64(
<vscale x 4 x i64> %0,
i32 %1)
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64(
<vscale x 4 x double>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x double> @intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vfcvt.f.x.v v8, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64(
<vscale x 4 x double> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64(
<vscale x 8 x i64>,
i32);
define <vscale x 8 x double> @intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vfcvt.f.x.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64(
<vscale x 8 x i64> %0,
i32 %1)
ret <vscale x 8 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64(
<vscale x 8 x double>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x double> @intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: vfcvt.f.x.v v8, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64(
<vscale x 8 x double> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x double> %a
}

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
<vscale x 1 x i16>,
@ -440,3 +440,163 @@ entry:
ret <vscale x 16 x float> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64(
<vscale x 1 x i64>,
i32);
define <vscale x 1 x double> @intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vfcvt.f.xu.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64(
<vscale x 1 x i64> %0,
i32 %1)
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64(
<vscale x 1 x double>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64(
<vscale x 1 x double> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64(
<vscale x 2 x i64>,
i32);
define <vscale x 2 x double> @intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vfcvt.f.xu.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64(
<vscale x 2 x i64> %0,
i32 %1)
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64(
<vscale x 2 x double>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vfcvt.f.xu.v v8, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64(
<vscale x 2 x double> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64(
<vscale x 4 x i64>,
i32);
define <vscale x 4 x double> @intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vfcvt.f.xu.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64(
<vscale x 4 x i64> %0,
i32 %1)
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64(
<vscale x 4 x double>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vfcvt.f.xu.v v8, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64(
<vscale x 4 x double> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64(
<vscale x 8 x i64>,
i32);
define <vscale x 8 x double> @intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vfcvt.f.xu.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64(
<vscale x 8 x i64> %0,
i32 %1)
ret <vscale x 8 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64(
<vscale x 8 x double>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x double> @intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: vfcvt.f.xu.v v8, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64(
<vscale x 8 x double> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x double> %a
}

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
<vscale x 1 x half>,
@ -440,3 +440,163 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64(
<vscale x 1 x double>,
i32);
define <vscale x 1 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64(<vscale x 1 x double> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64(
<vscale x 1 x double> %0,
i32 %1)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64(
<vscale x 1 x i64>,
<vscale x 1 x double>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64(<vscale x 1 x i64> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv1i64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64(
<vscale x 1 x i64> %0,
<vscale x 1 x double> %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64(
<vscale x 2 x double>,
i32);
define <vscale x 2 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64(<vscale x 2 x double> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64(
<vscale x 2 x double> %0,
i32 %1)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64(
<vscale x 2 x i64>,
<vscale x 2 x double>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64(<vscale x 2 x i64> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv2i64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64(
<vscale x 2 x i64> %0,
<vscale x 2 x double> %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64(
<vscale x 4 x double>,
i32);
define <vscale x 4 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64(<vscale x 4 x double> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64(
<vscale x 4 x double> %0,
i32 %1)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64(
<vscale x 4 x i64>,
<vscale x 4 x double>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64(<vscale x 4 x i64> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv4i64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64(
<vscale x 4 x i64> %0,
<vscale x 4 x double> %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64(
<vscale x 8 x double>,
i32);
define <vscale x 8 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64(<vscale x 8 x double> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64(
<vscale x 8 x double> %0,
i32 %1)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64(
<vscale x 8 x i64>,
<vscale x 8 x double>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64(<vscale x 8 x i64> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.x.f.v_nxv8i64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64(
<vscale x 8 x i64> %0,
<vscale x 8 x double> %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
<vscale x 1 x half>,
@ -440,3 +440,163 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64(
<vscale x 1 x double>,
i32);
define <vscale x 1 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64(<vscale x 1 x double> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64(
<vscale x 1 x double> %0,
i32 %1)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64(
<vscale x 1 x i64>,
<vscale x 1 x double>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64(<vscale x 1 x i64> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64(
<vscale x 1 x i64> %0,
<vscale x 1 x double> %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64(
<vscale x 2 x double>,
i32);
define <vscale x 2 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64(<vscale x 2 x double> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64(
<vscale x 2 x double> %0,
i32 %1)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64(
<vscale x 2 x i64>,
<vscale x 2 x double>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64(<vscale x 2 x i64> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64(
<vscale x 2 x i64> %0,
<vscale x 2 x double> %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64(
<vscale x 4 x double>,
i32);
define <vscale x 4 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64(<vscale x 4 x double> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64(
<vscale x 4 x double> %0,
i32 %1)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64(
<vscale x 4 x i64>,
<vscale x 4 x double>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64(<vscale x 4 x i64> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64(
<vscale x 4 x i64> %0,
<vscale x 4 x double> %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64(
<vscale x 8 x double>,
i32);
define <vscale x 8 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64(<vscale x 8 x double> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64(
<vscale x 8 x double> %0,
i32 %1)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64(
<vscale x 8 x i64>,
<vscale x 8 x double>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64(<vscale x 8 x i64> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64(
<vscale x 8 x i64> %0,
<vscale x 8 x double> %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
<vscale x 1 x half>,
@ -440,3 +440,163 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64(
<vscale x 1 x double>,
i32);
define <vscale x 1 x i64> @intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64(<vscale x 1 x double> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vfcvt.x.f.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64(
<vscale x 1 x double> %0,
i32 %1)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64(
<vscale x 1 x i64>,
<vscale x 1 x double>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64(<vscale x 1 x i64> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64(
<vscale x 1 x i64> %0,
<vscale x 1 x double> %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64(
<vscale x 2 x double>,
i32);
define <vscale x 2 x i64> @intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64(<vscale x 2 x double> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vfcvt.x.f.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64(
<vscale x 2 x double> %0,
i32 %1)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64(
<vscale x 2 x i64>,
<vscale x 2 x double>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64(<vscale x 2 x i64> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64(
<vscale x 2 x i64> %0,
<vscale x 2 x double> %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64(
<vscale x 4 x double>,
i32);
define <vscale x 4 x i64> @intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64(<vscale x 4 x double> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vfcvt.x.f.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64(
<vscale x 4 x double> %0,
i32 %1)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64(
<vscale x 4 x i64>,
<vscale x 4 x double>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64(<vscale x 4 x i64> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vfcvt.x.f.v v8, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64(
<vscale x 4 x i64> %0,
<vscale x 4 x double> %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64(
<vscale x 8 x double>,
i32);
define <vscale x 8 x i64> @intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64(<vscale x 8 x double> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vfcvt.x.f.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64(
<vscale x 8 x double> %0,
i32 %1)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64(
<vscale x 8 x i64>,
<vscale x 8 x double>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64(<vscale x 8 x i64> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64(
<vscale x 8 x i64> %0,
<vscale x 8 x double> %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
<vscale x 1 x half>,
@ -440,3 +440,163 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64(
<vscale x 1 x double>,
i32);
define <vscale x 1 x i64> @intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64(<vscale x 1 x double> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vfcvt.xu.f.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64(
<vscale x 1 x double> %0,
i32 %1)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64(
<vscale x 1 x i64>,
<vscale x 1 x double>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64(<vscale x 1 x i64> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64(
<vscale x 1 x i64> %0,
<vscale x 1 x double> %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64(
<vscale x 2 x double>,
i32);
define <vscale x 2 x i64> @intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64(<vscale x 2 x double> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vfcvt.xu.f.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64(
<vscale x 2 x double> %0,
i32 %1)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64(
<vscale x 2 x i64>,
<vscale x 2 x double>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64(<vscale x 2 x i64> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vfcvt.xu.f.v v8, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64(
<vscale x 2 x i64> %0,
<vscale x 2 x double> %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64(
<vscale x 4 x double>,
i32);
define <vscale x 4 x i64> @intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64(<vscale x 4 x double> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vfcvt.xu.f.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64(
<vscale x 4 x double> %0,
i32 %1)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64(
<vscale x 4 x i64>,
<vscale x 4 x double>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64(<vscale x 4 x i64> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vfcvt.xu.f.v v8, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64(
<vscale x 4 x i64> %0,
<vscale x 4 x double> %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64(
<vscale x 8 x double>,
i32);
define <vscale x 8 x i64> @intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64(<vscale x 8 x double> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vfcvt.xu.f.v v8, v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64(
<vscale x 8 x double> %0,
i32 %1)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64(
<vscale x 8 x i64>,
<vscale x 8 x double>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64(<vscale x 8 x i64> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: vfcvt.xu.f.v v8, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64(
<vscale x 8 x i64> %0,
<vscale x 8 x double> %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
<vscale x 1 x i32>,
@ -205,3 +205,167 @@ entry:
ret <vscale x 16 x half> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64(
<vscale x 1 x i64>,
i32);
define <vscale x 1 x float> @intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vfncvt.f.x.w v25, v8
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64(
<vscale x 1 x i64> %0,
i32 %1)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64(
<vscale x 1 x float>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x float> @intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64(
<vscale x 1 x float> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64(
<vscale x 2 x i64>,
i32);
define <vscale x 2 x float> @intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vfncvt.f.x.w v25, v8
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64(
<vscale x 2 x i64> %0,
i32 %1)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64(
<vscale x 2 x float>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x float> @intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64(
<vscale x 2 x float> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64(
<vscale x 4 x i64>,
i32);
define <vscale x 4 x float> @intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vfncvt.f.x.w v26, v8
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64(
<vscale x 4 x i64> %0,
i32 %1)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64(
<vscale x 4 x float>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x float> @intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vfncvt.f.x.w v8, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64(
<vscale x 4 x float> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64(
<vscale x 8 x i64>,
i32);
define <vscale x 8 x float> @intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vfncvt.f.x.w v28, v8
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64(
<vscale x 8 x i64> %0,
i32 %1)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64(
<vscale x 8 x float>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x float> @intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vfncvt.f.x.w v8, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64(
<vscale x 8 x float> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x float> %a
}

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
<vscale x 1 x i32>,
@ -205,3 +205,167 @@ entry:
ret <vscale x 16 x half> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64(
<vscale x 1 x i64>,
i32);
define <vscale x 1 x float> @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vfncvt.f.xu.w v25, v8
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64(
<vscale x 1 x i64> %0,
i32 %1)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64(
<vscale x 1 x float>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfncvt.f.xu.w v8, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64(
<vscale x 1 x float> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64(
<vscale x 2 x i64>,
i32);
define <vscale x 2 x float> @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vfncvt.f.xu.w v25, v8
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64(
<vscale x 2 x i64> %0,
i32 %1)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64(
<vscale x 2 x float>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vfncvt.f.xu.w v8, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64(
<vscale x 2 x float> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64(
<vscale x 4 x i64>,
i32);
define <vscale x 4 x float> @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vfncvt.f.xu.w v26, v8
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64(
<vscale x 4 x i64> %0,
i32 %1)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64(
<vscale x 4 x float>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vfncvt.f.xu.w v8, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64(
<vscale x 4 x float> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64(
<vscale x 8 x i64>,
i32);
define <vscale x 8 x float> @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vfncvt.f.xu.w v28, v8
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64(
<vscale x 8 x i64> %0,
i32 %1)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64(
<vscale x 8 x float>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vfncvt.f.xu.w v8, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64(
<vscale x 8 x float> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x float> %a
}

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
<vscale x 1 x half>,
@ -205,3 +205,167 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32(
<vscale x 1 x float>,
i32);
define <vscale x 1 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vfwcvt.rtz.x.f.v v25, v8
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32(
<vscale x 1 x float> %0,
i32 %1)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32(
<vscale x 1 x i64>,
<vscale x 1 x float>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32(<vscale x 1 x i64> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv1i64_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32(
<vscale x 1 x i64> %0,
<vscale x 1 x float> %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32(
<vscale x 2 x float>,
i32);
define <vscale x 2 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vfwcvt.rtz.x.f.v v26, v8
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32(
<vscale x 2 x float> %0,
i32 %1)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32(
<vscale x 2 x i64>,
<vscale x 2 x float>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32(<vscale x 2 x i64> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv2i64_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32(
<vscale x 2 x i64> %0,
<vscale x 2 x float> %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32(
<vscale x 4 x float>,
i32);
define <vscale x 4 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vfwcvt.rtz.x.f.v v28, v8
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32(
<vscale x 4 x float> %0,
i32 %1)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32(
<vscale x 4 x i64>,
<vscale x 4 x float>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32(<vscale x 4 x i64> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv4i64_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32(
<vscale x 4 x i64> %0,
<vscale x 4 x float> %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32(
<vscale x 8 x float>,
i32);
define <vscale x 8 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vfwcvt.rtz.x.f.v v16, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32(
<vscale x 8 x float> %0,
i32 %1)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32(
<vscale x 8 x i64>,
<vscale x 8 x float>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32(<vscale x 8 x i64> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.x.f.v_nxv8i64_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32(
<vscale x 8 x i64> %0,
<vscale x 8 x float> %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
<vscale x 1 x half>,
@ -205,3 +205,167 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32(
<vscale x 1 x float>,
i32);
define <vscale x 1 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vfwcvt.rtz.xu.f.v v25, v8
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32(
<vscale x 1 x float> %0,
i32 %1)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32(
<vscale x 1 x i64>,
<vscale x 1 x float>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32(<vscale x 1 x i64> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv1i64_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32(
<vscale x 1 x i64> %0,
<vscale x 1 x float> %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32(
<vscale x 2 x float>,
i32);
define <vscale x 2 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vfwcvt.rtz.xu.f.v v26, v8
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32(
<vscale x 2 x float> %0,
i32 %1)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32(
<vscale x 2 x i64>,
<vscale x 2 x float>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32(<vscale x 2 x i64> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv2i64_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32(
<vscale x 2 x i64> %0,
<vscale x 2 x float> %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32(
<vscale x 4 x float>,
i32);
define <vscale x 4 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vfwcvt.rtz.xu.f.v v28, v8
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32(
<vscale x 4 x float> %0,
i32 %1)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32(
<vscale x 4 x i64>,
<vscale x 4 x float>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32(<vscale x 4 x i64> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv4i64_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32(
<vscale x 4 x i64> %0,
<vscale x 4 x float> %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32(
<vscale x 8 x float>,
i32);
define <vscale x 8 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vfwcvt.rtz.xu.f.v v16, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32(
<vscale x 8 x float> %0,
i32 %1)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32(
<vscale x 8 x i64>,
<vscale x 8 x float>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32(<vscale x 8 x i64> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_mask_rtz.xu.f.v_nxv8i64_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32(
<vscale x 8 x i64> %0,
<vscale x 8 x float> %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
<vscale x 1 x half>,
@ -205,3 +205,167 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32(
<vscale x 1 x float>,
i32);
define <vscale x 1 x i64> @intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vfwcvt.x.f.v v25, v8
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32(
<vscale x 1 x float> %0,
i32 %1)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32(
<vscale x 1 x i64>,
<vscale x 1 x float>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32(<vscale x 1 x i64> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfwcvt.x.f.v v8, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32(
<vscale x 1 x i64> %0,
<vscale x 1 x float> %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32(
<vscale x 2 x float>,
i32);
define <vscale x 2 x i64> @intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vfwcvt.x.f.v v26, v8
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32(
<vscale x 2 x float> %0,
i32 %1)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32(
<vscale x 2 x i64>,
<vscale x 2 x float>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32(<vscale x 2 x i64> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vfwcvt.x.f.v v8, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32(
<vscale x 2 x i64> %0,
<vscale x 2 x float> %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32(
<vscale x 4 x float>,
i32);
define <vscale x 4 x i64> @intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vfwcvt.x.f.v v28, v8
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32(
<vscale x 4 x float> %0,
i32 %1)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32(
<vscale x 4 x i64>,
<vscale x 4 x float>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32(<vscale x 4 x i64> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vfwcvt.x.f.v v8, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32(
<vscale x 4 x i64> %0,
<vscale x 4 x float> %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32(
<vscale x 8 x float>,
i32);
define <vscale x 8 x i64> @intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vfwcvt.x.f.v v16, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32(
<vscale x 8 x float> %0,
i32 %1)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32(
<vscale x 8 x i64>,
<vscale x 8 x float>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32(<vscale x 8 x i64> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vfwcvt.x.f.v v8, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32(
<vscale x 8 x i64> %0,
<vscale x 8 x float> %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
<vscale x 1 x half>,
@ -205,3 +205,167 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32(
<vscale x 1 x float>,
i32);
define <vscale x 1 x i64> @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32(<vscale x 1 x float> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vfwcvt.xu.f.v v25, v8
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32(
<vscale x 1 x float> %0,
i32 %1)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32(
<vscale x 1 x i64>,
<vscale x 1 x float>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32(<vscale x 1 x i64> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32(
<vscale x 1 x i64> %0,
<vscale x 1 x float> %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32(
<vscale x 2 x float>,
i32);
define <vscale x 2 x i64> @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32(<vscale x 2 x float> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vfwcvt.xu.f.v v26, v8
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32(
<vscale x 2 x float> %0,
i32 %1)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32(
<vscale x 2 x i64>,
<vscale x 2 x float>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32(<vscale x 2 x i64> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vfwcvt.xu.f.v v8, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32(
<vscale x 2 x i64> %0,
<vscale x 2 x float> %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32(
<vscale x 4 x float>,
i32);
define <vscale x 4 x i64> @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32(<vscale x 4 x float> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vfwcvt.xu.f.v v28, v8
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32(
<vscale x 4 x float> %0,
i32 %1)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32(
<vscale x 4 x i64>,
<vscale x 4 x float>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32(<vscale x 4 x i64> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vfwcvt.xu.f.v v8, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32(
<vscale x 4 x i64> %0,
<vscale x 4 x float> %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32(
<vscale x 8 x float>,
i32);
define <vscale x 8 x i64> @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32(<vscale x 8 x float> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vfwcvt.xu.f.v v16, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32(
<vscale x 8 x float> %0,
i32 %1)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32(
<vscale x 8 x i64>,
<vscale x 8 x float>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32(<vscale x 8 x i64> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vfwcvt.xu.f.v v8, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32(
<vscale x 8 x i64> %0,
<vscale x 8 x float> %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}

View File

@ -612,3 +612,147 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64(
i32);
define <vscale x 1 x i64> @intrinsic_vid_v_nxv1i64(i32 %0) nounwind {
; CHECK-LABEL: intrinsic_vid_v_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vid.v v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64(
i32 %0)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vid_mask_v_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vid.v v8, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i1> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64(
i32);
define <vscale x 2 x i64> @intrinsic_vid_v_nxv2i64(i32 %0) nounwind {
; CHECK-LABEL: intrinsic_vid_v_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vid.v v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64(
i32 %0)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vid_mask_v_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vid.v v8, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i1> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64(
i32);
define <vscale x 4 x i64> @intrinsic_vid_v_nxv4i64(i32 %0) nounwind {
; CHECK-LABEL: intrinsic_vid_v_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vid.v v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64(
i32 %0)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vid_mask_v_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vid.v v8, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i1> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(
i32);
define <vscale x 8 x i64> @intrinsic_vid_v_nxv8i64(i32 %0) nounwind {
; CHECK-LABEL: intrinsic_vid_v_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vid.v v8
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(
i32 %0)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vid_mask_v_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: vid.v v8, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i1> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}

View File

@ -720,3 +720,163 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64(
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_viota_m_nxv1i64_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_viota_m_nxv1i64_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: viota.m v8, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64(
<vscale x 1 x i1> %0,
i32 %1)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i1>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_viota_mask_m_nxv1i64_nxv1i1(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i64_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: viota.m v8, v0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i1> %1,
<vscale x 1 x i1> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64(
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_viota_m_nxv2i64_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_viota_m_nxv2i64_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: viota.m v8, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64(
<vscale x 2 x i1> %0,
i32 %1)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i1>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_viota_mask_m_nxv2i64_nxv2i1(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i64_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: viota.m v8, v0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i1> %1,
<vscale x 2 x i1> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64(
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_viota_m_nxv4i64_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_viota_m_nxv4i64_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: viota.m v8, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64(
<vscale x 4 x i1> %0,
i32 %1)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i1>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_viota_mask_m_nxv4i64_nxv4i1(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i64_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: viota.m v8, v0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i1> %1,
<vscale x 4 x i1> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64(
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_viota_m_nxv8i64_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_viota_m_nxv8i64_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: viota.m v8, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64(
<vscale x 8 x i1> %0,
i32 %1)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i1>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_viota_mask_m_nxv8i64_nxv8i1(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i64_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: viota.m v8, v0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i1> %1,
<vscale x 8 x i1> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}

View File

@ -1,7 +1,327 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -mattr=+experimental-zfh \
; RUN: -mattr=+f -verify-machineinstrs \
; RUN: -mattr=+d -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
<vscale x 1 x i64>*,
i32);
define <vscale x 1 x i64> @intrinsic_vle_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vle_v_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
<vscale x 1 x i64>* %0,
i32 %1)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>*,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vle_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64>* %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64(
<vscale x 2 x i64>*,
i32);
define <vscale x 2 x i64> @intrinsic_vle_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vle_v_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64(
<vscale x 2 x i64>* %0,
i32 %1)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>*,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vle_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64>* %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64(
<vscale x 4 x i64>*,
i32);
define <vscale x 4 x i64> @intrinsic_vle_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vle_v_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64(
<vscale x 4 x i64>* %0,
i32 %1)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>*,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vle_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64>* %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64(
<vscale x 8 x i64>*,
i32);
define <vscale x 8 x i64> @intrinsic_vle_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vle_v_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64(
<vscale x 8 x i64>* %0,
i32 %1)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>*,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vle_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64>* %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x double> @llvm.riscv.vle.nxv1f64(
<vscale x 1 x double>*,
i32);
define <vscale x 1 x double> @intrinsic_vle_v_nxv1f64_nxv1f64(<vscale x 1 x double>* %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vle_v_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vle.nxv1f64(
<vscale x 1 x double>* %0,
i32 %1)
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>*,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x double> @intrinsic_vle_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
<vscale x 1 x double> %0,
<vscale x 1 x double>* %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vle.nxv2f64(
<vscale x 2 x double>*,
i32);
define <vscale x 2 x double> @intrinsic_vle_v_nxv2f64_nxv2f64(<vscale x 2 x double>* %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vle_v_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vle.nxv2f64(
<vscale x 2 x double>* %0,
i32 %1)
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>*,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x double> @intrinsic_vle_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
<vscale x 2 x double> %0,
<vscale x 2 x double>* %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vle.nxv4f64(
<vscale x 4 x double>*,
i32);
define <vscale x 4 x double> @intrinsic_vle_v_nxv4f64_nxv4f64(<vscale x 4 x double>* %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vle_v_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vle.nxv4f64(
<vscale x 4 x double>* %0,
i32 %1)
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>*,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x double> @intrinsic_vle_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
<vscale x 4 x double> %0,
<vscale x 4 x double>* %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vle.nxv8f64(
<vscale x 8 x double>*,
i32);
define <vscale x 8 x double> @intrinsic_vle_v_nxv8f64_nxv8f64(<vscale x 8 x double>* %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vle_v_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vle.nxv8f64(
<vscale x 8 x double>* %0,
i32 %1)
ret <vscale x 8 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
<vscale x 8 x double>,
<vscale x 8 x double>*,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x double> @intrinsic_vle_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
<vscale x 8 x double> %0,
<vscale x 8 x double>* %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x double> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
<vscale x 1 x i32>*,
i32);

View File

@ -1,6 +1,202 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh,+f,+d -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.nxv1i64(
<vscale x 1 x i64>*,
i32);
define <vscale x 1 x i64> @intrinsic_vleff_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, i32 %1, i32* %2) nounwind {
; CHECK-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
; CHECK-NEXT: vle64ff.v v8, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
; CHECK-NEXT: ret
entry:
%a = call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.nxv1i64(
<vscale x 1 x i64>* %0,
i32 %1)
%b = extractvalue { <vscale x 1 x i64>, i32 } %a, 0
%c = extractvalue { <vscale x 1 x i64>, i32 } %a, 1
store i32 %c, i32* %2
ret <vscale x 1 x i64> %b
}
declare { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>*,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, i32 %3, i32* %4) nounwind {
; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
; CHECK-NEXT: ret
entry:
%a = call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64>* %1,
<vscale x 1 x i1> %2,
i32 %3)
%b = extractvalue { <vscale x 1 x i64>, i32 } %a, 0
%c = extractvalue { <vscale x 1 x i64>, i32 } %a, 1
store i32 %c, i32* %4
ret <vscale x 1 x i64> %b
}
declare { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.nxv2i64(
<vscale x 2 x i64>*,
i32);
define <vscale x 2 x i64> @intrinsic_vleff_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, i32 %1, i32* %2) nounwind {
; CHECK-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
; CHECK-NEXT: vle64ff.v v8, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
; CHECK-NEXT: ret
entry:
%a = call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.nxv2i64(
<vscale x 2 x i64>* %0,
i32 %1)
%b = extractvalue { <vscale x 2 x i64>, i32 } %a, 0
%c = extractvalue { <vscale x 2 x i64>, i32 } %a, 1
store i32 %c, i32* %2
ret <vscale x 2 x i64> %b
}
declare { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>*,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vleff_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, i32 %3, i32* %4) nounwind {
; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
; CHECK-NEXT: ret
entry:
%a = call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64>* %1,
<vscale x 2 x i1> %2,
i32 %3)
%b = extractvalue { <vscale x 2 x i64>, i32 } %a, 0
%c = extractvalue { <vscale x 2 x i64>, i32 } %a, 1
store i32 %c, i32* %4
ret <vscale x 2 x i64> %b
}
declare { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.nxv4i64(
<vscale x 4 x i64>*,
i32);
define <vscale x 4 x i64> @intrinsic_vleff_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, i32 %1, i32* %2) nounwind {
; CHECK-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
; CHECK-NEXT: vle64ff.v v8, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
; CHECK-NEXT: ret
entry:
%a = call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.nxv4i64(
<vscale x 4 x i64>* %0,
i32 %1)
%b = extractvalue { <vscale x 4 x i64>, i32 } %a, 0
%c = extractvalue { <vscale x 4 x i64>, i32 } %a, 1
store i32 %c, i32* %2
ret <vscale x 4 x i64> %b
}
declare { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>*,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vleff_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, i32 %3, i32* %4) nounwind {
; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
; CHECK-NEXT: ret
entry:
%a = call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64>* %1,
<vscale x 4 x i1> %2,
i32 %3)
%b = extractvalue { <vscale x 4 x i64>, i32 } %a, 0
%c = extractvalue { <vscale x 4 x i64>, i32 } %a, 1
store i32 %c, i32* %4
ret <vscale x 4 x i64> %b
}
declare { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.nxv8i64(
<vscale x 8 x i64>*,
i32);
define <vscale x 8 x i64> @intrinsic_vleff_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, i32 %1, i32* %2) nounwind {
; CHECK-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
; CHECK-NEXT: vle64ff.v v8, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
; CHECK-NEXT: ret
entry:
%a = call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.nxv8i64(
<vscale x 8 x i64>* %0,
i32 %1)
%b = extractvalue { <vscale x 8 x i64>, i32 } %a, 0
%c = extractvalue { <vscale x 8 x i64>, i32 } %a, 1
store i32 %c, i32* %2
ret <vscale x 8 x i64> %b
}
declare { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>*,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vleff_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, i32 %3, i32* %4) nounwind {
; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vle64ff.v v8, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
; CHECK-NEXT: ret
entry:
%a = call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64>* %1,
<vscale x 8 x i1> %2,
i32 %3)
%b = extractvalue { <vscale x 8 x i64>, i32 } %a, 0
%c = extractvalue { <vscale x 8 x i64>, i32 } %a, 1
store i32 %c, i32* %4
ret <vscale x 8 x i64> %b
}
declare { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.nxv1f64(
<vscale x 1 x double>*,
i32);

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,358 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64(
<vscale x 1 x i64>*,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vlse_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vlse_v_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vlse64.v v8, (a0), a1
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64(
<vscale x 1 x i64>* %0,
i32 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>*,
i32,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64>* %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64(
<vscale x 2 x i64>*,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vlse_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vlse_v_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vlse64.v v8, (a0), a1
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64(
<vscale x 2 x i64>* %0,
i32 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>*,
i32,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vlse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,tu,mu
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64>* %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64(
<vscale x 4 x i64>*,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vlse_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vlse_v_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vlse64.v v8, (a0), a1
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64(
<vscale x 4 x i64>* %0,
i32 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>*,
i32,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vlse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,tu,mu
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64>* %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64(
<vscale x 8 x i64>*,
i32,
i32);
define <vscale x 8 x i64> @intrinsic_vlse_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vlse_v_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vlse64.v v8, (a0), a1
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64(
<vscale x 8 x i64>* %0,
i32 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>*,
i32,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vlse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,tu,mu
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64>* %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x double> @llvm.riscv.vlse.nxv1f64(
<vscale x 1 x double>*,
i32,
i32);
define <vscale x 1 x double> @intrinsic_vlse_v_nxv1f64_nxv1f64(<vscale x 1 x double>* %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vlse_v_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vlse64.v v8, (a0), a1
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vlse.nxv1f64(
<vscale x 1 x double>* %0,
i32 %1,
i32 %2)
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>*,
i32,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x double> @intrinsic_vlse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,tu,mu
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64(
<vscale x 1 x double> %0,
<vscale x 1 x double>* %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vlse.nxv2f64(
<vscale x 2 x double>*,
i32,
i32);
define <vscale x 2 x double> @intrinsic_vlse_v_nxv2f64_nxv2f64(<vscale x 2 x double>* %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vlse_v_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vlse64.v v8, (a0), a1
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vlse.nxv2f64(
<vscale x 2 x double>* %0,
i32 %1,
i32 %2)
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>*,
i32,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x double> @intrinsic_vlse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,tu,mu
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64(
<vscale x 2 x double> %0,
<vscale x 2 x double>* %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vlse.nxv4f64(
<vscale x 4 x double>*,
i32,
i32);
define <vscale x 4 x double> @intrinsic_vlse_v_nxv4f64_nxv4f64(<vscale x 4 x double>* %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vlse_v_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vlse64.v v8, (a0), a1
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vlse.nxv4f64(
<vscale x 4 x double>* %0,
i32 %1,
i32 %2)
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>*,
i32,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x double> @intrinsic_vlse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,tu,mu
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64(
<vscale x 4 x double> %0,
<vscale x 4 x double>* %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vlse.nxv8f64(
<vscale x 8 x double>*,
i32,
i32);
define <vscale x 8 x double> @intrinsic_vlse_v_nxv8f64_nxv8f64(<vscale x 8 x double>* %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vlse_v_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vlse64.v v8, (a0), a1
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vlse.nxv8f64(
<vscale x 8 x double>* %0,
i32 %1,
i32 %2)
ret <vscale x 8 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64(
<vscale x 8 x double>,
<vscale x 8 x double>*,
i32,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x double> @intrinsic_vlse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,tu,mu
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64(
<vscale x 8 x double> %0,
<vscale x 8 x double>* %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x double> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32(
<vscale x 1 x i32>*,
i32,

File diff suppressed because it is too large Load Diff

View File

@ -691,6 +691,144 @@ entry:
ret <vscale x 8 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vmacc.vv v8, v9, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vmacc.vv v8, v10, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vmacc.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vmacc.vv v8, v12, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vmacc.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1380,3 +1518,189 @@ entry:
ret <vscale x 8 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vmacc.vv v8, v25, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vmacc.vv v8, v25, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vmacc.vv v8, v26, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vmacc.vv v8, v26, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vmacc.vv v8, v28, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vmacc.vv v8, v28, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}

View File

@ -361,6 +361,86 @@ entry:
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmadc.vv v0, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmadc.vv v0, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmadc.vv v0, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vmadc.vv v0, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -721,6 +801,114 @@ entry:
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vmadc.vv v0, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vmadc.vv v0, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vmadc.vv v0, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
i32);
define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vmadc.vv v0, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 8 x i1> %a
}
define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
@ -990,3 +1178,63 @@ entry:
ret <vscale x 16 x i1> %a
}
define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmadc.vi v0, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 1 x i1> %a
}
define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmadc.vi v0, v8, -9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 -9,
i32 %1)
ret <vscale x 2 x i1> %a
}
define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmadc.vi v0, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 4 x i1> %a
}
define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vmadc.vi v0, v8, -9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 -9,
i32 %1)
ret <vscale x 8 x i1> %a
}

View File

@ -415,6 +415,98 @@ entry:
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmadc.vvm v25, v8, v9, v0
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmadc.vvm v25, v8, v10, v0
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmadc.vvm v25, v8, v12, v0
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vmadc.vvm v25, v8, v16, v0
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -829,6 +921,126 @@ entry:
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v26, v26, v25
; CHECK-NEXT: vmadc.vvm v25, v8, v26, v0
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vmadc.vvm v25, v8, v26, v0
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vmadc.vvm v25, v8, v28, v0
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vmadc.vvm v25, v8, v16, v0
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i1> %a
}
define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
@ -1134,3 +1346,71 @@ entry:
ret <vscale x 16 x i1> %a
}
define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmadc.vim v25, v8, 9, v0
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 9,
<vscale x 1 x i1> %1,
i32 %2)
ret <vscale x 1 x i1> %a
}
define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmadc.vim v25, v8, 9, v0
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 9,
<vscale x 2 x i1> %1,
i32 %2)
ret <vscale x 2 x i1> %a
}
define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmadc.vim v25, v8, 9, v0
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 9,
<vscale x 4 x i1> %1,
i32 %2)
ret <vscale x 4 x i1> %a
}
define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vmadc.vim v25, v8, 9, v0
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 9,
<vscale x 8 x i1> %1,
i32 %2)
ret <vscale x 8 x i1> %a
}

View File

@ -691,6 +691,144 @@ entry:
ret <vscale x 8 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vmadd.vv v8, v9, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vmadd.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vmadd.vv v8, v10, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vmadd.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vmadd.vv v8, v12, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vmadd.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1380,3 +1518,189 @@ entry:
ret <vscale x 8 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vmadd.vv v8, v25, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vmadd.vv v8, v25, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vmadd.vv v8, v26, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vmadd.vv v8, v26, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vmadd.vv v8, v28, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vmadd.vv v8, v28, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmax.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vmax.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmax.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vmax.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmax.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vmax.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vmax.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1587,3 +1764,249 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i64> @intrinsic_vmax_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vmax.vv v8, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vmax.vv v8, v9, v25, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i64> @intrinsic_vmax_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vmax.vv v8, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vmax.vv v8, v10, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i64> @intrinsic_vmax_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vmax.vv v8, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vmax.vv v8, v12, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
i32);
define <vscale x 8 x i64> @intrinsic_vmax_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmax_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vmax.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmax_mask_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrrs a3, vlenb, zero
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v24, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v0, v24, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v24, v24, v0
; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmaxu.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vmaxu.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmaxu.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vmaxu.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmaxu.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vmaxu.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vmaxu.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1587,3 +1764,249 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i64> @intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vmaxu.vv v8, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vmaxu.vv v8, v9, v25, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i64> @intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vmaxu.vv v8, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vmaxu.vv v8, v10, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i64> @intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vmaxu.vv v8, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vmaxu.vv v8, v12, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
i32);
define <vscale x 8 x i64> @intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vmaxu.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmaxu_mask_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrrs a3, vlenb, zero
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v24, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v0, v24, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v24, v24, v0
; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -397,6 +397,94 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vmerge_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vmerge_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vmerge_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vmerge_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -793,6 +881,122 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vmerge_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmerge_vxm_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vmerge.vvm v8, v8, v25, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vmerge_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmerge_vxm_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vmerge.vvm v8, v8, v26, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vmerge_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmerge_vxm_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vmerge.vvm v8, v8, v28, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}
define <vscale x 1 x i8> @intrinsic_vmerge_vim_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmerge_vim_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
@ -1080,3 +1284,67 @@ entry:
ret <vscale x 16 x i32> %a
}
define <vscale x 1 x i64> @intrinsic_vmerge_vim_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmerge_vim_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmerge.vim v8, v8, 9, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 9,
<vscale x 1 x i1> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vmerge_vim_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmerge_vim_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmerge.vim v8, v8, 9, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 9,
<vscale x 2 x i1> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vmerge_vim_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmerge_vim_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmerge.vim v8, v8, 9, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 9,
<vscale x 4 x i1> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
define <vscale x 8 x i64> @intrinsic_vmerge_vim_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmerge_vim_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vmerge.vim v8, v8, 9, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 9,
<vscale x 8 x i1> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmin.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmin.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vmin.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmin.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vmin.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vmin.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1587,3 +1764,249 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i64> @intrinsic_vmin_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vmin.vv v8, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vmin.vv v8, v9, v25, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i64> @intrinsic_vmin_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vmin.vv v8, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vmin.vv v8, v10, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i64> @intrinsic_vmin_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vmin.vv v8, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vmin.vv v8, v12, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
i32);
define <vscale x 8 x i64> @intrinsic_vmin_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmin_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vmin.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmin_mask_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrrs a3, vlenb, zero
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v24, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v0, v24, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v24, v24, v0
; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vminu.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vminu.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vminu.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vminu.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vminu.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vminu.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vminu.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1587,3 +1764,249 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i64> @intrinsic_vminu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vminu.vv v8, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vminu.vv v8, v9, v25, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i64> @intrinsic_vminu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vminu.vv v8, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vminu.vv v8, v10, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i64> @intrinsic_vminu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vminu.vv v8, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vminu.vv v8, v12, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
i32);
define <vscale x 8 x i64> @intrinsic_vminu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vminu_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vminu.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vminu_mask_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrrs a3, vlenb, zero
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v24, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v0, v24, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v24, v24, v0
; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -361,6 +361,86 @@ entry:
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i1> @intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmsbc.vv v0, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i1> @intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmsbc.vv v0, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i1> @intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmsbc.vv v0, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i1> @intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vmsbc.vv v0, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -720,3 +800,111 @@ entry:
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i1> @intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vmsbc.vv v0, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i1> @intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vmsbc.vv v0, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i1> @intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vmsbc.vv v0, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
i32);
define <vscale x 8 x i1> @intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vmsbc.vv v0, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 8 x i1> %a
}

View File

@ -415,6 +415,98 @@ entry:
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmsbc.vvm v25, v8, v9, v0
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmsbc.vvm v25, v8, v10, v0
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmsbc.vvm v25, v8, v12, v0
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vmsbc.vvm v25, v8, v16, v0
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -828,3 +920,123 @@ entry:
ret <vscale x 16 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v26, v26, v25
; CHECK-NEXT: vmsbc.vvm v25, v8, v26, v0
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vmsbc.vvm v25, v8, v26, v0
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vmsbc.vvm v25, v8, v28, v0
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i1> %a
}
declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vmsbc.vvm v25, v8, v16, v0
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i1> %a
}

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
<vscale x 1 x i8>,
@ -781,6 +781,162 @@ entry:
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmseq.vv v0, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(
<vscale x 1 x i1>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu
; CHECK-NEXT: vmseq.vv v0, v8, v9
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vmseq.vv v25, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i32 %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i32 %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmseq.vv v0, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64(
<vscale x 2 x i1>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu
; CHECK-NEXT: vmseq.vv v0, v8, v10
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vmseq.vv v25, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
i32 %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i32 %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmseq.vv v0, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64(
<vscale x 4 x i1>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu
; CHECK-NEXT: vmseq.vv v0, v8, v12
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vmseq.vv v25, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
i32 %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i32 %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1486,6 +1642,192 @@ entry:
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vmseq.vv v0, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
<vscale x 1 x i1>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v27, a0
; CHECK-NEXT: vsll.vx v27, v27, a1
; CHECK-NEXT: vsrl.vx v27, v27, a1
; CHECK-NEXT: vor.vv v26, v27, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmseq.vv v25, v8, v26, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vmseq.vv v0, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
<vscale x 2 x i1>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmseq.vv v25, v8, v26, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vmseq.vv v0, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
<vscale x 4 x i1>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmseq.vv v25, v8, v28, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i1> %a
}
define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
@ -2010,3 +2352,108 @@ entry:
ret <vscale x 8 x i1> %a
}
define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmseq.vi v0, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 1 x i1> %a
}
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i1> %a
}
define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmseq.vi v0, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 2 x i1> %a
}
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i1> %a
}
define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmseq.vi v0, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 4 x i1> %a
}
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmseq.vi v25, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i1> %a
}

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
<vscale x 1 x i8>,

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
<vscale x 1 x i8>,
@ -781,6 +781,162 @@ entry:
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmsle.vv v0, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64(
<vscale x 1 x i1>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu
; CHECK-NEXT: vmsle.vv v0, v8, v9
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vmsle.vv v25, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i32 %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i32 %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmsle.vv v0, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64(
<vscale x 2 x i1>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu
; CHECK-NEXT: vmsle.vv v0, v8, v10
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vmsle.vv v25, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
i32 %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i32 %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmsle.vv v0, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64(
<vscale x 4 x i1>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu
; CHECK-NEXT: vmsle.vv v0, v8, v12
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vmsle.vv v25, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
i32 %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i32 %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1486,6 +1642,192 @@ entry:
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vmsle.vv v0, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
<vscale x 1 x i1>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v27, a0
; CHECK-NEXT: vsll.vx v27, v27, a1
; CHECK-NEXT: vsrl.vx v27, v27, a1
; CHECK-NEXT: vor.vv v26, v27, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsle.vv v25, v8, v26, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vmsle.vv v0, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
<vscale x 2 x i1>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmsle.vv v25, v8, v26, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vmsle.vv v0, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
<vscale x 4 x i1>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmsle.vv v25, v8, v28, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i1> %a
}
define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
@ -2010,3 +2352,108 @@ entry:
ret <vscale x 8 x i1> %a
}
define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmsle.vi v0, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 1 x i1> %a
}
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i1> %a
}
define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmsle.vi v0, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 2 x i1> %a
}
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i1> %a
}
define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmsle.vi v0, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 4 x i1> %a
}
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmsle.vi v25, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i1> %a
}

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
<vscale x 1 x i8>,

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
<vscale x 1 x i8>,
@ -781,6 +781,162 @@ entry:
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmsleu.vv v0, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64(
<vscale x 1 x i1>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu
; CHECK-NEXT: vmsleu.vv v0, v8, v9
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vmsleu.vv v25, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i32 %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i32 %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmsleu.vv v0, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64(
<vscale x 2 x i1>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu
; CHECK-NEXT: vmsleu.vv v0, v8, v10
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vmsleu.vv v25, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
i32 %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i32 %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmsleu.vv v0, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64(
<vscale x 4 x i1>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu
; CHECK-NEXT: vmsleu.vv v0, v8, v12
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vmsleu.vv v25, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
i32 %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i32 %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1486,6 +1642,192 @@ entry:
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vmsleu.vv v0, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
<vscale x 1 x i1>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v27, a0
; CHECK-NEXT: vsll.vx v27, v27, a1
; CHECK-NEXT: vsrl.vx v27, v27, a1
; CHECK-NEXT: vor.vv v26, v27, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsleu.vv v25, v8, v26, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vmsleu.vv v0, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
<vscale x 2 x i1>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmsleu.vv v25, v8, v26, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vmsleu.vv v0, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
<vscale x 4 x i1>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmsleu.vv v25, v8, v28, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i1> %a
}
define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
@ -2010,3 +2352,108 @@ entry:
ret <vscale x 8 x i1> %a
}
define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmsleu.vi v0, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 1 x i1> %a
}
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i1> %a
}
define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmsleu.vi v0, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 2 x i1> %a
}
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i1> %a
}
define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmsleu.vi v0, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 4 x i1> %a
}
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmsleu.vi v25, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i1> %a
}

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
<vscale x 1 x i8>,

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
<vscale x 1 x i8>,

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
<vscale x 1 x i8>,

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
<vscale x 1 x i8>,
@ -781,6 +781,162 @@ entry:
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmsltu.vv v0, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64(
<vscale x 1 x i1>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu
; CHECK-NEXT: vmsltu.vv v0, v8, v9
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vmsltu.vv v25, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i32 %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i32 %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmsltu.vv v0, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64(
<vscale x 2 x i1>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu
; CHECK-NEXT: vmsltu.vv v0, v8, v10
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vmsltu.vv v25, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
i32 %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i32 %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmsltu.vv v0, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64(
<vscale x 4 x i1>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu
; CHECK-NEXT: vmsltu.vv v0, v8, v12
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vmsltu.vv v25, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
i32 %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i32 %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1486,6 +1642,192 @@ entry:
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vmsltu.vv v0, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
<vscale x 1 x i1>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v27, a0
; CHECK-NEXT: vsll.vx v27, v27, a1
; CHECK-NEXT: vsrl.vx v27, v27, a1
; CHECK-NEXT: vor.vv v26, v27, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsltu.vv v25, v8, v26, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vmsltu.vv v0, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
<vscale x 2 x i1>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmsltu.vv v25, v8, v26, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vmsltu.vv v0, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
<vscale x 4 x i1>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmsltu.vv v25, v8, v28, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i1> %a
}
define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
@ -2010,3 +2352,108 @@ entry:
ret <vscale x 8 x i1> %a
}
define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmsleu.vi v0, v8, 13
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 14,
i32 %1)
ret <vscale x 1 x i1> %a
}
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsleu.vi v25, v8, 14, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 15,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i1> %a
}
define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmsleu.vi v0, v8, 15
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 16,
i32 %1)
ret <vscale x 2 x i1> %a
}
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmsleu.vi v25, v8, -16, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %1,
i64 -15,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i1> %a
}
define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmsleu.vi v0, v8, -15
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 -14,
i32 %1)
ret <vscale x 4 x i1> %a
}
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmsleu.vi v25, v8, -14, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %1,
i64 -13,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i1> %a
}

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
<vscale x 1 x i8>,

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
<vscale x 1 x i8>,
@ -781,6 +781,162 @@ entry:
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmsne.vv v0, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(
<vscale x 1 x i1>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu
; CHECK-NEXT: vmsne.vv v0, v8, v9
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vmsne.vv v25, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i32 %4)
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i32 %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmsne.vv v0, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64(
<vscale x 2 x i1>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu
; CHECK-NEXT: vmsne.vv v0, v8, v10
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vmsne.vv v25, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
i32 %4)
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i32 %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmsne.vv v0, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64(
<vscale x 4 x i1>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu
; CHECK-NEXT: vmsne.vv v0, v8, v12
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vmsne.vv v25, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
i32 %4)
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i32 %4)
ret <vscale x 4 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1486,6 +1642,192 @@ entry:
ret <vscale x 8 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vmsne.vv v0, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
<vscale x 1 x i1>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v27, a0
; CHECK-NEXT: vsll.vx v27, v27, a1
; CHECK-NEXT: vsrl.vx v27, v27, a1
; CHECK-NEXT: vor.vv v26, v27, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsne.vv v25, v8, v26, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vmsne.vv v0, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i1> %a
}
declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
<vscale x 2 x i1>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmsne.vv v25, v8, v26, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vmsne.vv v0, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i1> %a
}
declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
<vscale x 4 x i1>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmsne.vv v25, v8, v28, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i1> %a
}
define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
@ -2010,3 +2352,108 @@ entry:
ret <vscale x 8 x i1> %a
}
define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmsne.vi v0, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 1 x i1> %a
}
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i1> %a
}
define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmsne.vi v0, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 2 x i1> %a
}
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
<vscale x 2 x i1> %0,
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i1> %a
}
define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmsne.vi v0, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 4 x i1> %a
}
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v25, v0
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmsne.vi v25, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
<vscale x 4 x i1> %0,
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i1> %a
}

View File

@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
<vscale x 1 x i8>,

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vmul_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmul.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vmul_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vmul.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmul.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vmul_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vmul.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vmul.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1587,3 +1764,249 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i64> @intrinsic_vmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmul_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vmul.vv v8, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vmul.vv v8, v9, v25, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i64> @intrinsic_vmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmul_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vmul.vv v8, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vmul.vv v8, v10, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i64> @intrinsic_vmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmul_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vmul.vv v8, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vmul.vv v8, v12, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
i32);
define <vscale x 8 x i64> @intrinsic_vmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmul_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vmul.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrrs a3, vlenb, zero
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v24, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v0, v24, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v24, v24, v0
; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmulh.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vmulh_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmulh.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vmulh_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vmulh.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmulh.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vmulh_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vmulh.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vmulh.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1587,3 +1764,249 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i64> @intrinsic_vmulh_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vmulh.vv v8, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vmulh.vv v8, v9, v25, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i64> @intrinsic_vmulh_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vmulh.vv v8, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vmulh.vv v8, v10, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i64> @intrinsic_vmulh_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vmulh.vv v8, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vmulh.vv v8, v12, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
i32);
define <vscale x 8 x i64> @intrinsic_vmulh_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vmulh.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrrs a3, vlenb, zero
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v24, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v0, v24, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v24, v24, v0
; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmulhsu.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vmulhsu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vmulhsu.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmulhsu.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vmulhsu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vmulhsu.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmulhsu.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vmulhsu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vmulhsu.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vmulhsu.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1587,3 +1764,249 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i64> @intrinsic_vmulhsu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vmulhsu.vv v8, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vmulhsu.vv v8, v9, v25, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i64> @intrinsic_vmulhsu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vmulhsu.vv v8, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vmulhsu.vv v8, v10, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i64> @intrinsic_vmulhsu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vmulhsu.vv v8, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vmulhsu.vv v8, v12, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
i32);
define <vscale x 8 x i64> @intrinsic_vmulhsu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vmulhsu.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhsu_mask_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrrs a3, vlenb, zero
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v24, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v0, v24, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v24, v24, v0
; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vmulhu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vmulhu.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vmulhu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vmulhu.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vmulhu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vmulhu.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vmulhu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vmulhu.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vmulhu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vmulhu.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vmulhu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vmulhu.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vmulhu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vmulhu.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1587,3 +1764,249 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i64> @intrinsic_vmulhu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vmulhu.vv v8, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vmulhu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vmulhu.vv v8, v9, v25, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i64> @intrinsic_vmulhu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vmulhu.vv v8, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vmulhu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vmulhu.vv v8, v10, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i64> @intrinsic_vmulhu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vmulhu.vv v8, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vmulhu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vmulhu.vv v8, v12, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
i32);
define <vscale x 8 x i64> @intrinsic_vmulhu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vmulhu.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vmulhu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmulhu_mask_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrrs a3, vlenb, zero
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v24, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v0, v24, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v24, v24, v0
; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -496,6 +496,186 @@ entry:
ret <vscale x 16 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i32);
define <vscale x 1 x i32> @intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vnclip.wv v25, v8, v9
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i32);
define <vscale x 2 x i32> @intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vnclip.wv v25, v8, v10
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32(
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vnclip.wv v8, v10, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i32);
define <vscale x 4 x i32> @intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vnclip.wv v26, v8, v12
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32(
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vnclip.wv v8, v12, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i32);
define <vscale x 8 x i32> @intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vnclip.wv v28, v8, v16
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32(
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vnclip.wv v8, v16, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
<vscale x 1 x i16>,
i32,
@ -991,6 +1171,186 @@ entry:
ret <vscale x 16 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
<vscale x 1 x i64>,
i32,
i32);
define <vscale x 1 x i32> @intrinsic_vnclip_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
; CHECK-NEXT: vnclip.wx v25, v8, a0
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
<vscale x 1 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
<vscale x 1 x i32>,
<vscale x 1 x i64>,
i32,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
; CHECK-NEXT: vnclip.wx v8, v9, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
<vscale x 1 x i32> %0,
<vscale x 1 x i64> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
<vscale x 2 x i64>,
i32,
i32);
define <vscale x 2 x i32> @intrinsic_vnclip_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
; CHECK-NEXT: vnclip.wx v25, v8, a0
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
<vscale x 2 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
<vscale x 2 x i32>,
<vscale x 2 x i64>,
i32,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
; CHECK-NEXT: vnclip.wx v8, v10, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
<vscale x 2 x i32> %0,
<vscale x 2 x i64> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
<vscale x 4 x i64>,
i32,
i32);
define <vscale x 4 x i32> @intrinsic_vnclip_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
; CHECK-NEXT: vnclip.wx v26, v8, a0
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
<vscale x 4 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
<vscale x 4 x i32>,
<vscale x 4 x i64>,
i32,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
; CHECK-NEXT: vnclip.wx v8, v12, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
<vscale x 4 x i32> %0,
<vscale x 4 x i64> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
<vscale x 8 x i64>,
i32,
i32);
define <vscale x 8 x i32> @intrinsic_vnclip_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i32_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
; CHECK-NEXT: vnclip.wx v28, v8, a0
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
<vscale x 8 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
<vscale x 8 x i32>,
<vscale x 8 x i64>,
i32,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
; CHECK-NEXT: vnclip.wx v8, v16, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
<vscale x 8 x i32> %0,
<vscale x 8 x i64> %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
define <vscale x 1 x i8> @intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8:
; CHECK: # %bb.0: # %entry
@ -1353,3 +1713,135 @@ entry:
ret <vscale x 16 x i16> %a
}
define <vscale x 1 x i32> @intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vnclip.wi v25, v8, 9
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
<vscale x 1 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 1 x i32> %a
}
define <vscale x 1 x i32> @intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vnclip.wi v8, v9, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
<vscale x 1 x i32> %0,
<vscale x 1 x i64> %1,
i32 9,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i32> %a
}
define <vscale x 2 x i32> @intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vnclip.wi v25, v8, 9
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
<vscale x 2 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 2 x i32> %a
}
define <vscale x 2 x i32> @intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vnclip.wi v8, v10, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
<vscale x 2 x i32> %0,
<vscale x 2 x i64> %1,
i32 9,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i32> %a
}
define <vscale x 4 x i32> @intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vnclip.wi v26, v8, 9
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
<vscale x 4 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 4 x i32> %a
}
define <vscale x 4 x i32> @intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vnclip.wi v8, v12, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
<vscale x 4 x i32> %0,
<vscale x 4 x i64> %1,
i32 9,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i32> %a
}
define <vscale x 8 x i32> @intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vnclip.wi v28, v8, 9
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
<vscale x 8 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 8 x i32> %a
}
define <vscale x 8 x i32> @intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vnclip.wi v8, v16, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
<vscale x 8 x i32> %0,
<vscale x 8 x i64> %1,
i32 9,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i32> %a
}

View File

@ -496,6 +496,186 @@ entry:
ret <vscale x 16 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i32);
define <vscale x 1 x i32> @intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vnclipu.wv v25, v8, v9
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vnclipu.wv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i32);
define <vscale x 2 x i32> @intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vnclipu.wv v25, v8, v10
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32(
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vnclipu.wv v8, v10, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i32);
define <vscale x 4 x i32> @intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vnclipu.wv v26, v8, v12
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32(
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vnclipu.wv v8, v12, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i32);
define <vscale x 8 x i32> @intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vnclipu.wv v28, v8, v16
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32(
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vnclipu.wv v8, v16, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
<vscale x 1 x i16>,
i32,
@ -991,6 +1171,186 @@ entry:
ret <vscale x 16 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
<vscale x 1 x i64>,
i32,
i32);
define <vscale x 1 x i32> @intrinsic_vnclipu_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
; CHECK-NEXT: vnclipu.wx v25, v8, a0
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
<vscale x 1 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
<vscale x 1 x i32>,
<vscale x 1 x i64>,
i32,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
; CHECK-NEXT: vnclipu.wx v8, v9, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
<vscale x 1 x i32> %0,
<vscale x 1 x i64> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
<vscale x 2 x i64>,
i32,
i32);
define <vscale x 2 x i32> @intrinsic_vnclipu_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
; CHECK-NEXT: vnclipu.wx v25, v8, a0
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
<vscale x 2 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
<vscale x 2 x i32>,
<vscale x 2 x i64>,
i32,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
; CHECK-NEXT: vnclipu.wx v8, v10, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
<vscale x 2 x i32> %0,
<vscale x 2 x i64> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
<vscale x 4 x i64>,
i32,
i32);
define <vscale x 4 x i32> @intrinsic_vnclipu_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
; CHECK-NEXT: vnclipu.wx v26, v8, a0
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
<vscale x 4 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
<vscale x 4 x i32>,
<vscale x 4 x i64>,
i32,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
; CHECK-NEXT: vnclipu.wx v8, v12, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
<vscale x 4 x i32> %0,
<vscale x 4 x i64> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
<vscale x 8 x i64>,
i32,
i32);
define <vscale x 8 x i32> @intrinsic_vnclipu_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i32_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
; CHECK-NEXT: vnclipu.wx v28, v8, a0
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
<vscale x 8 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
<vscale x 8 x i32>,
<vscale x 8 x i64>,
i32,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
; CHECK-NEXT: vnclipu.wx v8, v16, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
<vscale x 8 x i32> %0,
<vscale x 8 x i64> %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
define <vscale x 1 x i8> @intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8:
; CHECK: # %bb.0: # %entry
@ -1353,3 +1713,135 @@ entry:
ret <vscale x 16 x i16> %a
}
define <vscale x 1 x i32> @intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vnclipu.wi v25, v8, 9
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
<vscale x 1 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 1 x i32> %a
}
define <vscale x 1 x i32> @intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vnclipu.wi v8, v9, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
<vscale x 1 x i32> %0,
<vscale x 1 x i64> %1,
i32 9,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i32> %a
}
define <vscale x 2 x i32> @intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vnclipu.wi v25, v8, 9
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
<vscale x 2 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 2 x i32> %a
}
define <vscale x 2 x i32> @intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vnclipu.wi v8, v10, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
<vscale x 2 x i32> %0,
<vscale x 2 x i64> %1,
i32 9,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i32> %a
}
define <vscale x 4 x i32> @intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vnclipu.wi v26, v8, 9
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
<vscale x 4 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 4 x i32> %a
}
define <vscale x 4 x i32> @intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vnclipu.wi v8, v12, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
<vscale x 4 x i32> %0,
<vscale x 4 x i64> %1,
i32 9,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i32> %a
}
define <vscale x 8 x i32> @intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vnclipu.wi v28, v8, 9
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
<vscale x 8 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 8 x i32> %a
}
define <vscale x 8 x i32> @intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vnclipu.wi v8, v16, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
<vscale x 8 x i32> %0,
<vscale x 8 x i64> %1,
i32 9,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i32> %a
}

View File

@ -691,6 +691,144 @@ entry:
ret <vscale x 8 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vnmsac.vv v8, v9, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vnmsac.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vnmsac.vv v8, v10, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vnmsac.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vnmsac.vv v8, v12, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vnmsac.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1380,3 +1518,189 @@ entry:
ret <vscale x 8 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vnmsac.vv v8, v25, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vnmsac.vv v8, v25, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vnmsac.vv v8, v26, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vnmsac.vv v8, v26, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vnmsac.vv v8, v28, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vnmsac.vv v8, v28, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}

View File

@ -691,6 +691,144 @@ entry:
ret <vscale x 8 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vnmsub.vv v8, v9, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vnmsub.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vnmsub.vv v8, v10, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vnmsub.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vnmsub.vv v8, v12, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vnmsub.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1380,3 +1518,189 @@ entry:
ret <vscale x 8 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vnmsub.vv v8, v25, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vnmsub.vv v8, v25, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vnmsub.vv v8, v26, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vnmsub.vv v8, v26, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vnmsub.vv v8, v28, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vnmsub.vv v8, v28, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}

View File

@ -496,6 +496,186 @@ entry:
ret <vscale x 16 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i32);
define <vscale x 1 x i32> @intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vnsra.wv v25, v8, v9
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vnsra_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i32_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vnsra.wv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i32);
define <vscale x 2 x i32> @intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vnsra.wv v25, v8, v10
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32(
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vnsra_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i32_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vnsra.wv v8, v10, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i32);
define <vscale x 4 x i32> @intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vnsra.wv v26, v8, v12
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32(
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vnsra_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i32_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vnsra.wv v8, v12, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i32);
define <vscale x 8 x i32> @intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vnsra.wv v28, v8, v16
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32(
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vnsra_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i32_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vnsra.wv v8, v16, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
<vscale x 1 x i16>,
i32,
@ -991,6 +1171,186 @@ entry:
ret <vscale x 16 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
<vscale x 1 x i64>,
i32,
i32);
define <vscale x 1 x i32> @intrinsic_vnsra_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
; CHECK-NEXT: vnsra.wx v25, v8, a0
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
<vscale x 1 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64(
<vscale x 1 x i32>,
<vscale x 1 x i64>,
i32,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vnsra_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
; CHECK-NEXT: vnsra.wx v8, v9, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64(
<vscale x 1 x i32> %0,
<vscale x 1 x i64> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
<vscale x 2 x i64>,
i32,
i32);
define <vscale x 2 x i32> @intrinsic_vnsra_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
; CHECK-NEXT: vnsra.wx v25, v8, a0
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
<vscale x 2 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64(
<vscale x 2 x i32>,
<vscale x 2 x i64>,
i32,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vnsra_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
; CHECK-NEXT: vnsra.wx v8, v10, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64(
<vscale x 2 x i32> %0,
<vscale x 2 x i64> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
<vscale x 4 x i64>,
i32,
i32);
define <vscale x 4 x i32> @intrinsic_vnsra_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
; CHECK-NEXT: vnsra.wx v26, v8, a0
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
<vscale x 4 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64(
<vscale x 4 x i32>,
<vscale x 4 x i64>,
i32,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vnsra_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
; CHECK-NEXT: vnsra.wx v8, v12, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64(
<vscale x 4 x i32> %0,
<vscale x 4 x i64> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
<vscale x 8 x i64>,
i32,
i32);
define <vscale x 8 x i32> @intrinsic_vnsra_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i32_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
; CHECK-NEXT: vnsra.wx v28, v8, a0
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
<vscale x 8 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64(
<vscale x 8 x i32>,
<vscale x 8 x i64>,
i32,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vnsra_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i32_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
; CHECK-NEXT: vnsra.wx v8, v16, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64(
<vscale x 8 x i32> %0,
<vscale x 8 x i64> %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
define <vscale x 1 x i8> @intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8:
; CHECK: # %bb.0: # %entry
@ -1353,3 +1713,135 @@ entry:
ret <vscale x 16 x i16> %a
}
define <vscale x 1 x i32> @intrinsic_vnsra_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vnsra_vi_nxv1i32_nxv1i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vnsra.wi v25, v8, 9
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
<vscale x 1 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 1 x i32> %a
}
define <vscale x 1 x i32> @intrinsic_vnsra_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv1i32_nxv1i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vnsra.wi v8, v9, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64(
<vscale x 1 x i32> %0,
<vscale x 1 x i64> %1,
i32 9,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i32> %a
}
define <vscale x 2 x i32> @intrinsic_vnsra_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vnsra_vi_nxv2i32_nxv2i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vnsra.wi v25, v8, 9
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
<vscale x 2 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 2 x i32> %a
}
define <vscale x 2 x i32> @intrinsic_vnsra_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv2i32_nxv2i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vnsra.wi v8, v10, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64(
<vscale x 2 x i32> %0,
<vscale x 2 x i64> %1,
i32 9,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i32> %a
}
define <vscale x 4 x i32> @intrinsic_vnsra_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vnsra_vi_nxv4i32_nxv4i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vnsra.wi v26, v8, 9
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
<vscale x 4 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 4 x i32> %a
}
define <vscale x 4 x i32> @intrinsic_vnsra_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv4i32_nxv4i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vnsra.wi v8, v12, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64(
<vscale x 4 x i32> %0,
<vscale x 4 x i64> %1,
i32 9,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i32> %a
}
define <vscale x 8 x i32> @intrinsic_vnsra_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vnsra_vi_nxv8i32_nxv8i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vnsra.wi v28, v8, 9
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
<vscale x 8 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 8 x i32> %a
}
define <vscale x 8 x i32> @intrinsic_vnsra_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv8i32_nxv8i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vnsra.wi v8, v16, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64(
<vscale x 8 x i32> %0,
<vscale x 8 x i64> %1,
i32 9,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i32> %a
}

View File

@ -496,6 +496,186 @@ entry:
ret <vscale x 16 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i32);
define <vscale x 1 x i32> @intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vnsrl.wv v25, v8, v9
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv1i32_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vnsrl.wv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i32);
define <vscale x 2 x i32> @intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vnsrl.wv v25, v8, v10
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32(
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv2i32_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vnsrl.wv v8, v10, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i32);
define <vscale x 4 x i32> @intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vnsrl.wv v26, v8, v12
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32(
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv4i32_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vnsrl.wv v8, v12, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i32);
define <vscale x 8 x i32> @intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vnsrl.wv v28, v8, v16
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32(
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_mask_wv_nxv8i32_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vnsrl.wv v8, v16, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16(
<vscale x 1 x i16>,
i32,
@ -991,6 +1171,186 @@ entry:
ret <vscale x 16 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
<vscale x 1 x i64>,
i32,
i32);
define <vscale x 1 x i32> @intrinsic_vnsrl_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
; CHECK-NEXT: vnsrl.wx v25, v8, a0
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
<vscale x 1 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
<vscale x 1 x i32>,
<vscale x 1 x i64>,
i32,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vnsrl_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
; CHECK-NEXT: vnsrl.wx v8, v9, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
<vscale x 1 x i32> %0,
<vscale x 1 x i64> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
<vscale x 2 x i64>,
i32,
i32);
define <vscale x 2 x i32> @intrinsic_vnsrl_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
; CHECK-NEXT: vnsrl.wx v25, v8, a0
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
<vscale x 2 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
<vscale x 2 x i32>,
<vscale x 2 x i64>,
i32,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vnsrl_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
; CHECK-NEXT: vnsrl.wx v8, v10, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
<vscale x 2 x i32> %0,
<vscale x 2 x i64> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
<vscale x 4 x i64>,
i32,
i32);
define <vscale x 4 x i32> @intrinsic_vnsrl_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
; CHECK-NEXT: vnsrl.wx v26, v8, a0
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
<vscale x 4 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
<vscale x 4 x i32>,
<vscale x 4 x i64>,
i32,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vnsrl_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
; CHECK-NEXT: vnsrl.wx v8, v12, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
<vscale x 4 x i32> %0,
<vscale x 4 x i64> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
<vscale x 8 x i64>,
i32,
i32);
define <vscale x 8 x i32> @intrinsic_vnsrl_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i32_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
; CHECK-NEXT: vnsrl.wx v28, v8, a0
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
<vscale x 8 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
<vscale x 8 x i32>,
<vscale x 8 x i64>,
i32,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vnsrl_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_mask_vx_nxv8i32_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
; CHECK-NEXT: vnsrl.wx v8, v16, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
<vscale x 8 x i32> %0,
<vscale x 8 x i64> %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
define <vscale x 1 x i8> @intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8:
; CHECK: # %bb.0: # %entry
@ -1353,3 +1713,135 @@ entry:
ret <vscale x 16 x i16> %a
}
define <vscale x 1 x i32> @intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vnsrl.wi v25, v8, 9
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64(
<vscale x 1 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 1 x i32> %a
}
define <vscale x 1 x i32> @intrinsic_vnsrl_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv1i32_nxv1i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vnsrl.wi v8, v9, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64(
<vscale x 1 x i32> %0,
<vscale x 1 x i64> %1,
i32 9,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i32> %a
}
define <vscale x 2 x i32> @intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vnsrl.wi v25, v8, 9
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64(
<vscale x 2 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 2 x i32> %a
}
define <vscale x 2 x i32> @intrinsic_vnsrl_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv2i32_nxv2i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vnsrl.wi v8, v10, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64(
<vscale x 2 x i32> %0,
<vscale x 2 x i64> %1,
i32 9,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i32> %a
}
define <vscale x 4 x i32> @intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vnsrl.wi v26, v8, 9
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64(
<vscale x 4 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 4 x i32> %a
}
define <vscale x 4 x i32> @intrinsic_vnsrl_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv4i32_nxv4i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vnsrl.wi v8, v12, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64(
<vscale x 4 x i32> %0,
<vscale x 4 x i64> %1,
i32 9,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i32> %a
}
define <vscale x 8 x i32> @intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vnsrl.wi v28, v8, 9
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64(
<vscale x 8 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 8 x i32> %a
}
define <vscale x 8 x i32> @intrinsic_vnsrl_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vnsrl_mask_vi_nxv8i32_nxv8i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vnsrl.wi v8, v16, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64(
<vscale x 8 x i32> %0,
<vscale x 8 x i64> %1,
i32 9,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i32> %a
}

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vor.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vor.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vor.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vor.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vor.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vor.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vor.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1588,6 +1765,252 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i64> @intrinsic_vor_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vor.vv v8, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vor.vv v8, v9, v25, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i64> @intrinsic_vor_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vor.vv v8, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vor.vv v8, v10, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i64> @intrinsic_vor_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vor.vv v8, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vor.vv v8, v12, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
i32);
define <vscale x 8 x i64> @intrinsic_vor_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vor_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vor.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrrs a3, vlenb, zero
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v24, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v0, v24, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v24, v24, v0
; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vor.vv v8, v16, v24, v0.t
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
define <vscale x 1 x i8> @intrinsic_vor_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
@ -2163,3 +2586,131 @@ entry:
ret <vscale x 16 x i32> %a
}
define <vscale x 1 x i64> @intrinsic_vor_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vor.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 1 x i64> %a
}
define <vscale x 1 x i64> @intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vor.vi v8, v9, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vor_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vor.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 2 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vor.vi v8, v10, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vor_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vor.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 4 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vor.vi v8, v12, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
define <vscale x 8 x i64> @intrinsic_vor_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vor_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vor.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 8 x i64> %a
}
define <vscale x 8 x i64> @intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vor_mask_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: vor.vi v8, v16, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 9,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}

View File

@ -782,3 +782,187 @@ entry:
ret <vscale x 2 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vredand.vs v8, v9, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64(
<vscale x 1 x i64>,
<vscale x 2 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vredand.vs v8, v10, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64(
<vscale x 1 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64(
<vscale x 1 x i64>,
<vscale x 2 x i64>,
<vscale x 1 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64(
<vscale x 1 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64(
<vscale x 1 x i64>,
<vscale x 4 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vredand.vs v8, v12, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64(
<vscale x 1 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64(
<vscale x 1 x i64>,
<vscale x 4 x i64>,
<vscale x 1 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64(
<vscale x 1 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64(
<vscale x 1 x i64>,
<vscale x 8 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vredand.vs v8, v16, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64(
<vscale x 1 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64(
<vscale x 1 x i64>,
<vscale x 8 x i64>,
<vscale x 1 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vredand.vs v8, v16, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64(
<vscale x 1 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}

View File

@ -782,3 +782,187 @@ entry:
ret <vscale x 2 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64(
<vscale x 1 x i64>,
<vscale x 2 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vredmax.vs v8, v10, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64(
<vscale x 1 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64(
<vscale x 1 x i64>,
<vscale x 2 x i64>,
<vscale x 1 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64(
<vscale x 1 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64(
<vscale x 1 x i64>,
<vscale x 4 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vredmax.vs v8, v12, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64(
<vscale x 1 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64(
<vscale x 1 x i64>,
<vscale x 4 x i64>,
<vscale x 1 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64(
<vscale x 1 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64(
<vscale x 1 x i64>,
<vscale x 8 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vredmax.vs v8, v16, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64(
<vscale x 1 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64(
<vscale x 1 x i64>,
<vscale x 8 x i64>,
<vscale x 1 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vredmax.vs v8, v16, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64(
<vscale x 1 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}

View File

@ -782,3 +782,187 @@ entry:
ret <vscale x 2 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64(
<vscale x 1 x i64>,
<vscale x 2 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vredmaxu.vs v8, v10, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64(
<vscale x 1 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64(
<vscale x 1 x i64>,
<vscale x 2 x i64>,
<vscale x 1 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64(
<vscale x 1 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64(
<vscale x 1 x i64>,
<vscale x 4 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vredmaxu.vs v8, v12, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64(
<vscale x 1 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64(
<vscale x 1 x i64>,
<vscale x 4 x i64>,
<vscale x 1 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64(
<vscale x 1 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64(
<vscale x 1 x i64>,
<vscale x 8 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vredmaxu.vs v8, v16, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64(
<vscale x 1 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64(
<vscale x 1 x i64>,
<vscale x 8 x i64>,
<vscale x 1 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vredmaxu.vs v8, v16, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64(
<vscale x 1 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}

View File

@ -782,3 +782,187 @@ entry:
ret <vscale x 2 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64(
<vscale x 1 x i64>,
<vscale x 2 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vredmin.vs v8, v10, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64(
<vscale x 1 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64(
<vscale x 1 x i64>,
<vscale x 2 x i64>,
<vscale x 1 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64(
<vscale x 1 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64(
<vscale x 1 x i64>,
<vscale x 4 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vredmin.vs v8, v12, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64(
<vscale x 1 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64(
<vscale x 1 x i64>,
<vscale x 4 x i64>,
<vscale x 1 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64(
<vscale x 1 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64(
<vscale x 1 x i64>,
<vscale x 8 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vredmin.vs v8, v16, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64(
<vscale x 1 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64(
<vscale x 1 x i64>,
<vscale x 8 x i64>,
<vscale x 1 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vredmin.vs v8, v16, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64(
<vscale x 1 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}

View File

@ -782,3 +782,187 @@ entry:
ret <vscale x 2 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64(
<vscale x 1 x i64>,
<vscale x 2 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vredminu.vs v8, v10, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64(
<vscale x 1 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64(
<vscale x 1 x i64>,
<vscale x 2 x i64>,
<vscale x 1 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64(
<vscale x 1 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64(
<vscale x 1 x i64>,
<vscale x 4 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vredminu.vs v8, v12, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64(
<vscale x 1 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64(
<vscale x 1 x i64>,
<vscale x 4 x i64>,
<vscale x 1 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64(
<vscale x 1 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64(
<vscale x 1 x i64>,
<vscale x 8 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vredminu.vs v8, v16, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64(
<vscale x 1 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64(
<vscale x 1 x i64>,
<vscale x 8 x i64>,
<vscale x 1 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vredminu.vs v8, v16, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64(
<vscale x 1 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}

View File

@ -782,3 +782,187 @@ entry:
ret <vscale x 2 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vredor.vs v8, v9, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64(
<vscale x 1 x i64>,
<vscale x 2 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vredor.vs v8, v10, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64(
<vscale x 1 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64(
<vscale x 1 x i64>,
<vscale x 2 x i64>,
<vscale x 1 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64(
<vscale x 1 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64(
<vscale x 1 x i64>,
<vscale x 4 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vredor.vs v8, v12, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64(
<vscale x 1 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64(
<vscale x 1 x i64>,
<vscale x 4 x i64>,
<vscale x 1 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64(
<vscale x 1 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64(
<vscale x 1 x i64>,
<vscale x 8 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vredor.vs v8, v16, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64(
<vscale x 1 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64(
<vscale x 1 x i64>,
<vscale x 8 x i64>,
<vscale x 1 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vredor.vs v8, v16, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64(
<vscale x 1 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}

View File

@ -782,3 +782,187 @@ entry:
ret <vscale x 2 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64(
<vscale x 1 x i64>,
<vscale x 2 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vredsum.vs v8, v10, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64(
<vscale x 1 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64(
<vscale x 1 x i64>,
<vscale x 2 x i64>,
<vscale x 1 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64(
<vscale x 1 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64(
<vscale x 1 x i64>,
<vscale x 4 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vredsum.vs v8, v12, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64(
<vscale x 1 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64(
<vscale x 1 x i64>,
<vscale x 4 x i64>,
<vscale x 1 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64(
<vscale x 1 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64(
<vscale x 1 x i64>,
<vscale x 8 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vredsum.vs v8, v16, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64(
<vscale x 1 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64(
<vscale x 1 x i64>,
<vscale x 8 x i64>,
<vscale x 1 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64(
<vscale x 1 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}

View File

@ -782,3 +782,187 @@ entry:
ret <vscale x 2 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64(
<vscale x 1 x i64>,
<vscale x 2 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vredxor.vs v8, v10, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64(
<vscale x 1 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64(
<vscale x 1 x i64>,
<vscale x 2 x i64>,
<vscale x 1 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64(
<vscale x 1 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64(
<vscale x 1 x i64>,
<vscale x 4 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vredxor.vs v8, v12, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64(
<vscale x 1 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64(
<vscale x 1 x i64>,
<vscale x 4 x i64>,
<vscale x 1 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64(
<vscale x 1 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64(
<vscale x 1 x i64>,
<vscale x 8 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vredxor.vs v8, v16, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64(
<vscale x 1 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64(
<vscale x 1 x i64>,
<vscale x 8 x i64>,
<vscale x 1 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vredxor.vs v8, v16, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64(
<vscale x 1 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vrem.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vrem.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vrem.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vrem.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vrem.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vrem.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vrem.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1587,3 +1764,249 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i64> @intrinsic_vrem_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vrem.vv v8, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vrem.vv v8, v9, v25, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i64> @intrinsic_vrem_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vrem.vv v8, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vrem.vv v8, v10, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i64> @intrinsic_vrem_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vrem.vv v8, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vrem.vv v8, v12, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
i32);
define <vscale x 8 x i64> @intrinsic_vrem_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vrem_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vrem.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrem_mask_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrrs a3, vlenb, zero
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v24, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v0, v24, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v24, v24, v0
; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vremu.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vremu.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vremu.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vremu.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vremu.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vremu.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vremu.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1587,3 +1764,249 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i64> @intrinsic_vremu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vremu.vv v8, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vremu.vv v8, v9, v25, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i64> @intrinsic_vremu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vremu.vv v8, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vremu.vv v8, v10, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i64> @intrinsic_vremu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vremu.vv v8, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vremu.vv v8, v12, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
i32);
define <vscale x 8 x i64> @intrinsic_vremu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vremu_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vremu.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vremu_mask_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrrs a3, vlenb, zero
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v24, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v0, v24, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v24, v24, v0
; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -723,6 +723,97 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i16>,
i32);
define <vscale x 4 x i64> @intrinsic_vrgatherei16_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vrgatherei16.vv v28, v8, v12
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i16> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vrgatherei16_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i16>,
i32);
define <vscale x 8 x i64> @intrinsic_vrgatherei16_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vrgatherei16.vv v24, v8, v16
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i16> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl2re16.v v26, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vrgatherei16.vv v8, v16, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x half> @llvm.riscv.vrgatherei16.vv.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x i16>,

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vsadd.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vsadd.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vsadd.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vsadd.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1588,6 +1765,252 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i64> @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsadd.vv v8, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vsadd.vv v8, v9, v25, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i64> @intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsadd.vv v8, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vsadd.vv v8, v10, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i64> @intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vsadd.vv v8, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vsadd.vv v8, v12, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
i32);
define <vscale x 8 x i64> @intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vsadd.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrrs a3, vlenb, zero
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v24, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v0, v24, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v24, v24, v0
; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
define <vscale x 1 x i8> @intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
@ -2163,3 +2586,131 @@ entry:
ret <vscale x 16 x i32> %a
}
define <vscale x 1 x i64> @intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vsadd.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 1 x i64> %a
}
define <vscale x 1 x i64> @intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vsadd.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 2 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vsadd.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 4 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
define <vscale x 8 x i64> @intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vsadd.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 8 x i64> %a
}
define <vscale x 8 x i64> @intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 9,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vsaddu.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vsaddu.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vsaddu.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vsaddu.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1588,6 +1765,252 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i64> @intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsaddu.vv v8, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vsaddu.vv v8, v9, v25, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i64> @intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsaddu.vv v8, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vsaddu.vv v8, v10, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i64> @intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vsaddu.vv v8, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vsaddu.vv v8, v12, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
i32);
define <vscale x 8 x i64> @intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vsaddu.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrrs a3, vlenb, zero
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v24, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v0, v24, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v24, v24, v0
; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
define <vscale x 1 x i8> @intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
@ -2163,3 +2586,131 @@ entry:
ret <vscale x 16 x i32> %a
}
define <vscale x 1 x i64> @intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vsaddu.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 1 x i64> %a
}
define <vscale x 1 x i64> @intrinsic_vsaddu_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vsaddu.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 2 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vsaddu_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vsaddu.vi v8, v10, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vsaddu.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 4 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vsaddu_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vsaddu.vi v8, v12, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
define <vscale x 8 x i64> @intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vsaddu.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 8 x i64> %a
}
define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: vsaddu.vi v8, v16, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 9,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}

View File

@ -397,6 +397,94 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -792,3 +880,119 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsbc.vvm v8, v8, v25, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsbc.vvm v8, v8, v26, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vsbc.vvm v8, v8, v28, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}

View File

@ -1,7 +1,343 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -mattr=+experimental-zfh \
; RUN: -mattr=+f -verify-machineinstrs \
; RUN: -mattr=+d -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare void @llvm.riscv.vse.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>*,
i32);
define void @intrinsic_vse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vse_v_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vse.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64>* %1,
i32 %2)
ret void
}
declare void @llvm.riscv.vse.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>*,
<vscale x 1 x i1>,
i32);
define void @intrinsic_vse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
; CHECK-NEXT: vse64.v v8, (a0), v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vse.mask.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64>* %1,
<vscale x 1 x i1> %2,
i32 %3)
ret void
}
declare void @llvm.riscv.vse.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>*,
i32);
define void @intrinsic_vse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vse_v_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vse.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64>* %1,
i32 %2)
ret void
}
declare void @llvm.riscv.vse.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>*,
<vscale x 2 x i1>,
i32);
define void @intrinsic_vse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
; CHECK-NEXT: vse64.v v8, (a0), v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vse.mask.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64>* %1,
<vscale x 2 x i1> %2,
i32 %3)
ret void
}
declare void @llvm.riscv.vse.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>*,
i32);
define void @intrinsic_vse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vse_v_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vse.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64>* %1,
i32 %2)
ret void
}
declare void @llvm.riscv.vse.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>*,
<vscale x 4 x i1>,
i32);
define void @intrinsic_vse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
; CHECK-NEXT: vse64.v v8, (a0), v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vse.mask.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64>* %1,
<vscale x 4 x i1> %2,
i32 %3)
ret void
}
declare void @llvm.riscv.vse.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>*,
i32);
define void @intrinsic_vse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vse_v_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vse.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64>* %1,
i32 %2)
ret void
}
declare void @llvm.riscv.vse.mask.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>*,
<vscale x 8 x i1>,
i32);
define void @intrinsic_vse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
; CHECK-NEXT: vse64.v v8, (a0), v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vse.mask.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64>* %1,
<vscale x 8 x i1> %2,
i32 %3)
ret void
}
declare void @llvm.riscv.vse.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>*,
i32);
define void @intrinsic_vse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vse_v_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vse.nxv1f64(
<vscale x 1 x double> %0,
<vscale x 1 x double>* %1,
i32 %2)
ret void
}
declare void @llvm.riscv.vse.mask.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>*,
<vscale x 1 x i1>,
i32);
define void @intrinsic_vse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
; CHECK-NEXT: vse64.v v8, (a0), v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vse.mask.nxv1f64(
<vscale x 1 x double> %0,
<vscale x 1 x double>* %1,
<vscale x 1 x i1> %2,
i32 %3)
ret void
}
declare void @llvm.riscv.vse.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>*,
i32);
define void @intrinsic_vse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vse_v_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vse.nxv2f64(
<vscale x 2 x double> %0,
<vscale x 2 x double>* %1,
i32 %2)
ret void
}
declare void @llvm.riscv.vse.mask.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>*,
<vscale x 2 x i1>,
i32);
define void @intrinsic_vse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
; CHECK-NEXT: vse64.v v8, (a0), v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vse.mask.nxv2f64(
<vscale x 2 x double> %0,
<vscale x 2 x double>* %1,
<vscale x 2 x i1> %2,
i32 %3)
ret void
}
declare void @llvm.riscv.vse.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>*,
i32);
define void @intrinsic_vse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vse_v_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vse.nxv4f64(
<vscale x 4 x double> %0,
<vscale x 4 x double>* %1,
i32 %2)
ret void
}
declare void @llvm.riscv.vse.mask.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>*,
<vscale x 4 x i1>,
i32);
define void @intrinsic_vse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
; CHECK-NEXT: vse64.v v8, (a0), v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vse.mask.nxv4f64(
<vscale x 4 x double> %0,
<vscale x 4 x double>* %1,
<vscale x 4 x i1> %2,
i32 %3)
ret void
}
declare void @llvm.riscv.vse.nxv8f64(
<vscale x 8 x double>,
<vscale x 8 x double>*,
i32);
define void @intrinsic_vse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vse_v_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vse.nxv8f64(
<vscale x 8 x double> %0,
<vscale x 8 x double>* %1,
i32 %2)
ret void
}
declare void @llvm.riscv.vse.mask.nxv8f64(
<vscale x 8 x double>,
<vscale x 8 x double>*,
<vscale x 8 x i1>,
i32);
define void @intrinsic_vse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
; CHECK-NEXT: vse64.v v8, (a0), v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vse.mask.nxv8f64(
<vscale x 8 x double> %0,
<vscale x 8 x double>* %1,
<vscale x 8 x i1> %2,
i32 %3)
ret void
}
declare void @llvm.riscv.vse.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>*,

View File

@ -1,6 +1,334 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
<vscale x 1 x i8>,
i32);
define <vscale x 1 x i64> @intrinsic_vsext_vf8_nxv1i64(<vscale x 1 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsext_vf8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vsext.vf8 v25, v8
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
<vscale x 1 x i8> %0,
i32 %1)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8(
<vscale x 1 x i64>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vsext_mask_vf8_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vsext.vf8 v8, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8(
<vscale x 1 x i64> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %0,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8(
<vscale x 2 x i8>,
i32);
define <vscale x 2 x i64> @intrinsic_vsext_vf8_nxv2i64(<vscale x 2 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsext_vf8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vsext.vf8 v26, v8
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8(
<vscale x 2 x i8> %0,
i32 %1)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8(
<vscale x 2 x i64>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vsext_mask_vf8_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vsext.vf8 v8, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8(
<vscale x 2 x i64> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %0,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8(
<vscale x 4 x i8>,
i32);
define <vscale x 4 x i64> @intrinsic_vsext_vf8_nxv4i64(<vscale x 4 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsext_vf8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vsext.vf8 v28, v8
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8(
<vscale x 4 x i8> %0,
i32 %1)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8(
<vscale x 4 x i64>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vsext_mask_vf8_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vsext.vf8 v8, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8(
<vscale x 4 x i64> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %0,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8(
<vscale x 8 x i8>,
i32);
define <vscale x 8 x i64> @intrinsic_vsext_vf8_nxv8i64(<vscale x 8 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsext_vf8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vsext.vf8 v16, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8(
<vscale x 8 x i8> %0,
i32 %1)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8(
<vscale x 8 x i64>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vsext_mask_vf8_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: vsext.vf8 v8, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8(
<vscale x 8 x i64> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %0,
i32 %3)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16(
<vscale x 1 x i16>,
i32);
define <vscale x 1 x i64> @intrinsic_vsext_vf4_nxv1i64(<vscale x 1 x i16> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsext_vf4_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vsext.vf4 v25, v8
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16(
<vscale x 1 x i16> %0,
i32 %1)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i16(
<vscale x 1 x i64>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vsext_mask_vf4_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vsext.vf4 v8, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i16(
<vscale x 1 x i64> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %0,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16(
<vscale x 2 x i16>,
i32);
define <vscale x 2 x i64> @intrinsic_vsext_vf4_nxv2i64(<vscale x 2 x i16> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsext_vf4_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vsext.vf4 v26, v8
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16(
<vscale x 2 x i16> %0,
i32 %1)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i16(
<vscale x 2 x i64>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vsext_mask_vf4_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vsext.vf4 v8, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i16(
<vscale x 2 x i64> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %0,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16(
<vscale x 4 x i16>,
i32);
define <vscale x 4 x i64> @intrinsic_vsext_vf4_nxv4i64(<vscale x 4 x i16> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsext_vf4_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vsext.vf4 v28, v8
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16(
<vscale x 4 x i16> %0,
i32 %1)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i16(
<vscale x 4 x i64>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vsext_mask_vf4_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vsext.vf4 v8, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i16(
<vscale x 4 x i64> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %0,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16(
<vscale x 8 x i16>,
i32);
define <vscale x 8 x i64> @intrinsic_vsext_vf4_nxv8i64(<vscale x 8 x i16> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsext_vf4_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vsext.vf4 v16, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16(
<vscale x 8 x i16> %0,
i32 %1)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i16(
<vscale x 8 x i64>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vsext_mask_vf4_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: vsext.vf4 v8, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i16(
<vscale x 8 x i64> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %0,
i32 %3)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8(
<vscale x 1 x i8>,
i32);

View File

@ -1186,6 +1186,243 @@ entry:
ret <vscale x 8 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vslidedown_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
; CHECK-NEXT: vslidedown.vx v8, v9, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
define <vscale x 1 x i64> @intrinsic_vslidedown_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 9,
i32 %2)
ret <vscale x 1 x i64> %a
}
define <vscale x 1 x i64> @intrinsic_vslidedown_mask_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vslidedown.vi v8, v9, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 9,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vslidedown_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
; CHECK-NEXT: vslidedown.vx v8, v10, a0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
; CHECK-NEXT: vslidedown.vx v8, v10, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vslidedown_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vslidedown.vi v8, v10, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 9,
i32 %2)
ret <vscale x 2 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vslidedown_mask_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vslidedown.vi v8, v10, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 9,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vslidedown_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
; CHECK-NEXT: vslidedown.vx v8, v12, a0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
; CHECK-NEXT: vslidedown.vx v8, v12, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vslidedown_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vslidedown.vi v8, v12, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 9,
i32 %2)
ret <vscale x 4 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vslidedown_mask_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vslidedown.vi v8, v12, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 9,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,

View File

@ -1186,6 +1186,243 @@ entry:
ret <vscale x 8 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vslideup_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
define <vscale x 1 x i64> @intrinsic_vslideup_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vslideup.vi v8, v9, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 9,
i32 %2)
ret <vscale x 1 x i64> %a
}
define <vscale x 1 x i64> @intrinsic_vslideup_mask_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vslideup.vi v8, v9, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 9,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vslideup_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
; CHECK-NEXT: vslideup.vx v8, v10, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vslideup_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vslideup.vi v8, v10, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 9,
i32 %2)
ret <vscale x 2 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vslideup_mask_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vslideup.vi v8, v10, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 9,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vslideup_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
; CHECK-NEXT: vslideup.vx v8, v12, a0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
; CHECK-NEXT: vslideup.vx v8, v12, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vslideup_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vslideup.vi v8, v12, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 9,
i32 %2)
ret <vscale x 4 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vslideup_mask_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vslideup.vi v8, v12, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 9,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 1 x half> @llvm.riscv.vslideup.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vsll.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vsll.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vsll.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vsll.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vsll.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
<vscale x 1 x i8>,
i32,
@ -1588,6 +1765,182 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
<vscale x 1 x i64>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vsll_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsll_vx_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
<vscale x 1 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vsll_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
; CHECK-NEXT: vsll.vx v8, v9, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
<vscale x 2 x i64>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vsll_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsll_vx_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
<vscale x 2 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vsll_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
<vscale x 4 x i64>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vsll_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsll_vx_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
<vscale x 4 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vsll_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
; CHECK-NEXT: vsll.vx v8, v12, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
<vscale x 8 x i64>,
i32,
i32);
define <vscale x 8 x i64> @intrinsic_vsll_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsll_vx_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
; CHECK-NEXT: vsll.vx v8, v8, a0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
<vscale x 8 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vsll_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vsll.vx v8, v16, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
define <vscale x 1 x i8> @intrinsic_vsll_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsll_vi_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
@ -2163,3 +2516,131 @@ entry:
ret <vscale x 16 x i32> %a
}
define <vscale x 1 x i64> @intrinsic_vsll_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsll_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vsll.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
<vscale x 1 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 1 x i64> %a
}
define <vscale x 1 x i64> @intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vsll.vi v8, v9, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 9,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vsll_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsll_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vsll.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
<vscale x 2 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 2 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vsll.vi v8, v10, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 9,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vsll_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsll_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vsll.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
<vscale x 4 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 4 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vsll.vi v8, v12, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 9,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
define <vscale x 8 x i64> @intrinsic_vsll_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsll_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vsll.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
<vscale x 8 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 8 x i64> %a
}
define <vscale x 8 x i64> @intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: vsll.vi v8, v16, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 9,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vsmul.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vsmul.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vsmul.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vsmul.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1587,3 +1764,249 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsmul.vv v8, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vsmul.vv v8, v9, v25, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i64> @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsmul.vv v8, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vsmul.vv v8, v10, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i64> @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vsmul.vv v8, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vsmul.vv v8, v12, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
i32);
define <vscale x 8 x i64> @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vsmul.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrrs a3, vlenb, zero
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v24, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v0, v24, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v24, v24, v0
; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

File diff suppressed because it is too large Load Diff

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vsra.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vsra.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vsra.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vsra.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vsra.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vsra.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vsra.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
<vscale x 1 x i8>,
i32,
@ -1588,6 +1765,182 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
<vscale x 1 x i64>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vsra_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsra_vx_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
<vscale x 1 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vsra_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
; CHECK-NEXT: vsra.vx v8, v9, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
<vscale x 2 x i64>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vsra_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsra_vx_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
<vscale x 2 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vsra_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
; CHECK-NEXT: vsra.vx v8, v10, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
<vscale x 4 x i64>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vsra_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsra_vx_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
<vscale x 4 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vsra_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
; CHECK-NEXT: vsra.vx v8, v12, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
<vscale x 8 x i64>,
i32,
i32);
define <vscale x 8 x i64> @intrinsic_vsra_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsra_vx_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
; CHECK-NEXT: vsra.vx v8, v8, a0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
<vscale x 8 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vsra_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vsra.vx v8, v16, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
define <vscale x 1 x i8> @intrinsic_vsra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsra_vi_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
@ -2163,3 +2516,131 @@ entry:
ret <vscale x 16 x i32> %a
}
define <vscale x 1 x i64> @intrinsic_vsra_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsra_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vsra.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
<vscale x 1 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 1 x i64> %a
}
define <vscale x 1 x i64> @intrinsic_vsra_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vsra.vi v8, v9, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 9,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vsra_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsra_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vsra.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
<vscale x 2 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 2 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vsra_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vsra.vi v8, v10, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 9,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vsra_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsra_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vsra.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
<vscale x 4 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 4 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vsra_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vsra.vi v8, v12, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 9,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
define <vscale x 8 x i64> @intrinsic_vsra_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsra_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vsra.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
<vscale x 8 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 8 x i64> %a
}
define <vscale x 8 x i64> @intrinsic_vsra_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: vsra.vi v8, v16, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 9,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vsrl.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vsrl.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vsrl.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vsrl.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vsrl.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vsrl.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vsrl.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
<vscale x 1 x i8>,
i32,
@ -1588,6 +1765,182 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
<vscale x 1 x i64>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vsrl_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
<vscale x 1 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
; CHECK-NEXT: vsrl.vx v8, v9, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
<vscale x 2 x i64>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vsrl_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
<vscale x 2 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
; CHECK-NEXT: vsrl.vx v8, v10, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
<vscale x 4 x i64>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vsrl_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
<vscale x 4 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
; CHECK-NEXT: vsrl.vx v8, v12, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
<vscale x 8 x i64>,
i32,
i32);
define <vscale x 8 x i64> @intrinsic_vsrl_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
; CHECK-NEXT: vsrl.vx v8, v8, a0
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
<vscale x 8 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
; CHECK-NEXT: vsrl.vx v8, v16, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
define <vscale x 1 x i8> @intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
@ -2163,3 +2516,131 @@ entry:
ret <vscale x 16 x i32> %a
}
define <vscale x 1 x i64> @intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vsrl.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
<vscale x 1 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 1 x i64> %a
}
define <vscale x 1 x i64> @intrinsic_vsrl_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vsrl.vi v8, v9, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 9,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vsrl.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
<vscale x 2 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 2 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vsrl_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vsrl.vi v8, v10, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 9,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vsrl.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
<vscale x 4 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 4 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vsrl_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vsrl.vi v8, v12, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 9,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
define <vscale x 8 x i64> @intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vsrl.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
<vscale x 8 x i64> %0,
i32 9,
i32 %1)
ret <vscale x 8 x i64> %a
}
define <vscale x 8 x i64> @intrinsic_vsrl_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: vsrl.vi v8, v16, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 9,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}

View File

@ -1,6 +1,374 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare void @llvm.riscv.vsse.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>*,
i32,
i32);
define void @intrinsic_vsse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsse_v_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vsse64.v v8, (a0), a1
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vsse.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64>* %1,
i32 %2,
i32 %3)
ret void
}
declare void @llvm.riscv.vsse.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>*,
i32,
<vscale x 1 x i1>,
i32);
define void @intrinsic_vsse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vsse.mask.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64>* %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret void
}
declare void @llvm.riscv.vsse.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>*,
i32,
i32);
define void @intrinsic_vsse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsse_v_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vsse64.v v8, (a0), a1
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vsse.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64>* %1,
i32 %2,
i32 %3)
ret void
}
declare void @llvm.riscv.vsse.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>*,
i32,
<vscale x 2 x i1>,
i32);
define void @intrinsic_vsse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vsse.mask.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64>* %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret void
}
declare void @llvm.riscv.vsse.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>*,
i32,
i32);
define void @intrinsic_vsse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsse_v_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vsse64.v v8, (a0), a1
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vsse.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64>* %1,
i32 %2,
i32 %3)
ret void
}
declare void @llvm.riscv.vsse.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>*,
i32,
<vscale x 4 x i1>,
i32);
define void @intrinsic_vsse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vsse.mask.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64>* %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret void
}
declare void @llvm.riscv.vsse.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>*,
i32,
i32);
define void @intrinsic_vsse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsse_v_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vsse64.v v8, (a0), a1
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vsse.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64>* %1,
i32 %2,
i32 %3)
ret void
}
declare void @llvm.riscv.vsse.mask.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>*,
i32,
<vscale x 8 x i1>,
i32);
define void @intrinsic_vsse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vsse.mask.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64>* %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret void
}
declare void @llvm.riscv.vsse.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>*,
i32,
i32);
define void @intrinsic_vsse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsse_v_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vsse64.v v8, (a0), a1
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vsse.nxv1f64(
<vscale x 1 x double> %0,
<vscale x 1 x double>* %1,
i32 %2,
i32 %3)
ret void
}
declare void @llvm.riscv.vsse.mask.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>*,
i32,
<vscale x 1 x i1>,
i32);
define void @intrinsic_vsse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vsse.mask.nxv1f64(
<vscale x 1 x double> %0,
<vscale x 1 x double>* %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret void
}
declare void @llvm.riscv.vsse.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>*,
i32,
i32);
define void @intrinsic_vsse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsse_v_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vsse64.v v8, (a0), a1
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vsse.nxv2f64(
<vscale x 2 x double> %0,
<vscale x 2 x double>* %1,
i32 %2,
i32 %3)
ret void
}
declare void @llvm.riscv.vsse.mask.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>*,
i32,
<vscale x 2 x i1>,
i32);
define void @intrinsic_vsse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vsse.mask.nxv2f64(
<vscale x 2 x double> %0,
<vscale x 2 x double>* %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret void
}
declare void @llvm.riscv.vsse.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>*,
i32,
i32);
define void @intrinsic_vsse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsse_v_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vsse64.v v8, (a0), a1
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vsse.nxv4f64(
<vscale x 4 x double> %0,
<vscale x 4 x double>* %1,
i32 %2,
i32 %3)
ret void
}
declare void @llvm.riscv.vsse.mask.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>*,
i32,
<vscale x 4 x i1>,
i32);
define void @intrinsic_vsse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vsse.mask.nxv4f64(
<vscale x 4 x double> %0,
<vscale x 4 x double>* %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret void
}
declare void @llvm.riscv.vsse.nxv8f64(
<vscale x 8 x double>,
<vscale x 8 x double>*,
i32,
i32);
define void @intrinsic_vsse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, i32 %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsse_v_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vsse64.v v8, (a0), a1
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vsse.nxv8f64(
<vscale x 8 x double> %0,
<vscale x 8 x double>* %1,
i32 %2,
i32 %3)
ret void
}
declare void @llvm.riscv.vsse.mask.nxv8f64(
<vscale x 8 x double>,
<vscale x 8 x double>*,
i32,
<vscale x 8 x i1>,
i32);
define void @intrinsic_vsse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f64_nxv8f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
call void @llvm.riscv.vsse.mask.nxv8f64(
<vscale x 8 x double> %0,
<vscale x 8 x double>* %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret void
}
declare void @llvm.riscv.vsse.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>*,

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vssub.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vssub.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vssub.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vssub.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1587,3 +1764,249 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i64> @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vssub.vv v8, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vssub.vv v8, v9, v25, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i64> @intrinsic_vssub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vssub.vv v8, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vssub.vv v8, v10, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i64> @intrinsic_vssub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vssub.vv v8, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vssub.vv v8, v12, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
i32);
define <vscale x 8 x i64> @intrinsic_vssub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vssub.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrrs a3, vlenb, zero
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v24, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v0, v24, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v24, v24, v0
; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vssubu.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vssubu.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vssubu.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vssubu.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1587,3 +1764,249 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i64> @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vssubu.vv v8, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vssubu.vv v8, v9, v25, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i64> @intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vssubu.vv v8, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vssubu.vv v8, v10, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i64> @intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vssubu.vv v8, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vssubu.vv v8, v12, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
i32);
define <vscale x 8 x i64> @intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vssubu.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrrs a3, vlenb, zero
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v24, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v0, v24, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v24, v24, v0
; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vsub.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vsub.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vsub.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vsub.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1588,6 +1765,252 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i64> @intrinsic_vsub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsub_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsub.vv v8, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vsub.vv v8, v9, v25, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i64> @intrinsic_vsub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsub_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsub.vv v8, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vsub.vv v8, v10, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i64> @intrinsic_vsub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsub_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vsub.vv v8, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vsub.vv v8, v12, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
i32);
define <vscale x 8 x i64> @intrinsic_vsub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vsub_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vsub.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrrs a3, vlenb, zero
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v24, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v0, v24, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v24, v24, v0
; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
define <vscale x 1 x i8> @intrinsic_vsub_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsub_vi_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
@ -2163,3 +2586,131 @@ entry:
ret <vscale x 16 x i32> %a
}
define <vscale x 1 x i64> @intrinsic_vsub_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsub_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vadd.vi v8, v8, -9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 1 x i64> %a
}
define <vscale x 1 x i64> @intrinsic_vsub_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vsub_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsub_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vadd.vi v8, v8, -9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 2 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vsub_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vsub_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsub_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vadd.vi v8, v8, -9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 4 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vsub_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
define <vscale x 8 x i64> @intrinsic_vsub_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsub_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vadd.vi v8, v8, -9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 8 x i64> %a
}
define <vscale x 8 x i64> @intrinsic_vsub_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 9,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}

File diff suppressed because it is too large Load Diff

View File

@ -498,6 +498,187 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i32);
define <vscale x 1 x i64> @intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vwadd.wv v25, v8, v9
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vwadd.wv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i32);
define <vscale x 2 x i64> @intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vwadd.wv v26, v8, v10
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vwadd.wv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i32);
define <vscale x 4 x i64> @intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vwadd.wv v28, v8, v12
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vwadd.wv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i32);
define <vscale x 8 x i64> @intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vwadd.wv v24, v8, v16
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl4re32.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu
; CHECK-NEXT: vwadd.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8(
<vscale x 1 x i16>,
i8,
@ -992,3 +1173,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32(
<vscale x 1 x i64>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
; CHECK-NEXT: vwadd.wx v25, v8, a0
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32(
<vscale x 1 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
; CHECK-NEXT: vwadd.wx v8, v9, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32(
<vscale x 2 x i64>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
; CHECK-NEXT: vwadd.wx v26, v8, a0
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32(
<vscale x 2 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
; CHECK-NEXT: vwadd.wx v8, v10, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32(
<vscale x 4 x i64>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
; CHECK-NEXT: vwadd.wx v28, v8, a0
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32(
<vscale x 4 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
; CHECK-NEXT: vwadd.wx v8, v12, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32(
<vscale x 8 x i64>,
i32,
i32);
define <vscale x 8 x i64> @intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
; CHECK-NEXT: vwadd.wx v16, v8, a0
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32(
<vscale x 8 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
; CHECK-NEXT: vwadd.wx v8, v16, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -507,6 +507,190 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vwmacc.vv v8, v9, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vwmacc.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vwmacc.vv v8, v10, v11
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32(
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vwmacc.vv v8, v10, v11, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32(
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vwmacc.vv v8, v12, v14
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32(
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vwmacc.vv v8, v12, v14, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32(
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vwmacc.vv v8, v16, v20
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32(
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vwmacc.vv v8, v16, v20, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32(
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8(
<vscale x 1 x i16>,
i8,
@ -1012,3 +1196,187 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32(
<vscale x 1 x i64>,
i32,
<vscale x 1 x i32>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
; CHECK-NEXT: vwmacc.vx v8, a0, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32(
<vscale x 1 x i64> %0,
i32 %1,
<vscale x 1 x i32> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32(
<vscale x 1 x i64>,
i32,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
; CHECK-NEXT: vwmacc.vx v8, a0, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32(
<vscale x 1 x i64> %0,
i32 %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32(
<vscale x 2 x i64>,
i32,
<vscale x 2 x i32>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
; CHECK-NEXT: vwmacc.vx v8, a0, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32(
<vscale x 2 x i64> %0,
i32 %1,
<vscale x 2 x i32> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32(
<vscale x 2 x i64>,
i32,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
; CHECK-NEXT: vwmacc.vx v8, a0, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32(
<vscale x 2 x i64> %0,
i32 %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32(
<vscale x 4 x i64>,
i32,
<vscale x 4 x i32>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
; CHECK-NEXT: vwmacc.vx v8, a0, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32(
<vscale x 4 x i64> %0,
i32 %1,
<vscale x 4 x i32> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32(
<vscale x 4 x i64>,
i32,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
; CHECK-NEXT: vwmacc.vx v8, a0, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32(
<vscale x 4 x i64> %0,
i32 %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32(
<vscale x 8 x i64>,
i32,
<vscale x 8 x i32>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
; CHECK-NEXT: vwmacc.vx v8, a0, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32(
<vscale x 8 x i64> %0,
i32 %1,
<vscale x 8 x i32> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32(
<vscale x 8 x i64>,
i32,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
; CHECK-NEXT: vwmacc.vx v8, a0, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32(
<vscale x 8 x i64> %0,
i32 %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -507,6 +507,190 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vwmaccsu.vv v8, v9, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vwmaccsu.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vwmaccsu.vv v8, v10, v11
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32(
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vwmaccsu.vv v8, v10, v11, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32(
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vwmaccsu.vv v8, v12, v14
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32(
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vwmaccsu.vv v8, v12, v14, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32(
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vwmaccsu.vv v8, v16, v20
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32(
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vwmaccsu.vv v8, v16, v20, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32(
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8(
<vscale x 1 x i16>,
i8,
@ -1012,3 +1196,187 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32(
<vscale x 1 x i64>,
i32,
<vscale x 1 x i32>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
; CHECK-NEXT: vwmaccsu.vx v8, a0, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32(
<vscale x 1 x i64> %0,
i32 %1,
<vscale x 1 x i32> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32(
<vscale x 1 x i64>,
i32,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
; CHECK-NEXT: vwmaccsu.vx v8, a0, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32(
<vscale x 1 x i64> %0,
i32 %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32(
<vscale x 2 x i64>,
i32,
<vscale x 2 x i32>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
; CHECK-NEXT: vwmaccsu.vx v8, a0, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32(
<vscale x 2 x i64> %0,
i32 %1,
<vscale x 2 x i32> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32(
<vscale x 2 x i64>,
i32,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
; CHECK-NEXT: vwmaccsu.vx v8, a0, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32(
<vscale x 2 x i64> %0,
i32 %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32(
<vscale x 4 x i64>,
i32,
<vscale x 4 x i32>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
; CHECK-NEXT: vwmaccsu.vx v8, a0, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32(
<vscale x 4 x i64> %0,
i32 %1,
<vscale x 4 x i32> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32(
<vscale x 4 x i64>,
i32,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
; CHECK-NEXT: vwmaccsu.vx v8, a0, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32(
<vscale x 4 x i64> %0,
i32 %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32(
<vscale x 8 x i64>,
i32,
<vscale x 8 x i32>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
; CHECK-NEXT: vwmaccsu.vx v8, a0, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32(
<vscale x 8 x i64> %0,
i32 %1,
<vscale x 8 x i32> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32(
<vscale x 8 x i64>,
i32,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
; CHECK-NEXT: vwmaccsu.vx v8, a0, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32(
<vscale x 8 x i64> %0,
i32 %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -507,6 +507,190 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vwmaccu.vv v8, v9, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vwmaccu.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vwmaccu.vv v8, v10, v11
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32(
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vwmaccu.vv v8, v10, v11, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32(
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vwmaccu.vv v8, v12, v14
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32(
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vwmaccu.vv v8, v12, v14, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32(
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vwmaccu.vv v8, v16, v20
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32(
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vwmaccu.vv v8, v16, v20, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32(
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8(
<vscale x 1 x i16>,
i8,
@ -1012,3 +1196,187 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32(
<vscale x 1 x i64>,
i32,
<vscale x 1 x i32>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
; CHECK-NEXT: vwmaccu.vx v8, a0, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32(
<vscale x 1 x i64> %0,
i32 %1,
<vscale x 1 x i32> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32(
<vscale x 1 x i64>,
i32,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
; CHECK-NEXT: vwmaccu.vx v8, a0, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32(
<vscale x 1 x i64> %0,
i32 %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32(
<vscale x 2 x i64>,
i32,
<vscale x 2 x i32>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
; CHECK-NEXT: vwmaccu.vx v8, a0, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32(
<vscale x 2 x i64> %0,
i32 %1,
<vscale x 2 x i32> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32(
<vscale x 2 x i64>,
i32,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
; CHECK-NEXT: vwmaccu.vx v8, a0, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32(
<vscale x 2 x i64> %0,
i32 %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32(
<vscale x 4 x i64>,
i32,
<vscale x 4 x i32>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
; CHECK-NEXT: vwmaccu.vx v8, a0, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32(
<vscale x 4 x i64> %0,
i32 %1,
<vscale x 4 x i32> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32(
<vscale x 4 x i64>,
i32,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
; CHECK-NEXT: vwmaccu.vx v8, a0, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32(
<vscale x 4 x i64> %0,
i32 %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32(
<vscale x 8 x i64>,
i32,
<vscale x 8 x i32>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
; CHECK-NEXT: vwmaccu.vx v8, a0, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32(
<vscale x 8 x i64> %0,
i32 %1,
<vscale x 8 x i32> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32(
<vscale x 8 x i64>,
i32,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
; CHECK-NEXT: vwmaccu.vx v8, a0, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32(
<vscale x 8 x i64> %0,
i32 %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -506,3 +506,187 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32(
<vscale x 1 x i64>,
i32,
<vscale x 1 x i32>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
; CHECK-NEXT: vwmaccus.vx v8, a0, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32(
<vscale x 1 x i64> %0,
i32 %1,
<vscale x 1 x i32> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32(
<vscale x 1 x i64>,
i32,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
; CHECK-NEXT: vwmaccus.vx v8, a0, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32(
<vscale x 1 x i64> %0,
i32 %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32(
<vscale x 2 x i64>,
i32,
<vscale x 2 x i32>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
; CHECK-NEXT: vwmaccus.vx v8, a0, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32(
<vscale x 2 x i64> %0,
i32 %1,
<vscale x 2 x i32> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32(
<vscale x 2 x i64>,
i32,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
; CHECK-NEXT: vwmaccus.vx v8, a0, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32(
<vscale x 2 x i64> %0,
i32 %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32(
<vscale x 4 x i64>,
i32,
<vscale x 4 x i32>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
; CHECK-NEXT: vwmaccus.vx v8, a0, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32(
<vscale x 4 x i64> %0,
i32 %1,
<vscale x 4 x i32> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32(
<vscale x 4 x i64>,
i32,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
; CHECK-NEXT: vwmaccus.vx v8, a0, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32(
<vscale x 4 x i64> %0,
i32 %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32(
<vscale x 8 x i64>,
i32,
<vscale x 8 x i32>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
; CHECK-NEXT: vwmaccus.vx v8, a0, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32(
<vscale x 8 x i64> %0,
i32 %1,
<vscale x 8 x i32> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32(
<vscale x 8 x i64>,
i32,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
; CHECK-NEXT: vwmaccus.vx v8, a0, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32(
<vscale x 8 x i64> %0,
i32 %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -496,6 +496,186 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vwmul.vv v25, v8, v9
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i64_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vwmul.vv v26, v8, v9
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv2i64_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vwmul.vv v8, v10, v11, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32(
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vwmul.vv v28, v8, v10
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv4i64_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vwmul.vv v8, v12, v14, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32(
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vwmul.vv v16, v8, v12
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv8i64_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vwmul.vv v8, v16, v20, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32(
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -990,3 +1170,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
; CHECK-NEXT: vwmul.vx v25, v8, a0
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv1i64_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
; CHECK-NEXT: vwmul.vx v8, v9, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
; CHECK-NEXT: vwmul.vx v26, v8, a0
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv2i64_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
; CHECK-NEXT: vwmul.vx v8, v10, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32(
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
; CHECK-NEXT: vwmul.vx v28, v8, a0
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv4i64_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
; CHECK-NEXT: vwmul.vx v8, v12, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32(
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
i32);
define <vscale x 8 x i64> @intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
; CHECK-NEXT: vwmul.vx v16, v8, a0
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmul_mask_vx_nxv8i64_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
; CHECK-NEXT: vwmul.vx v8, v16, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32(
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -496,6 +496,186 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vwmulsu.vv v25, v8, v9
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vwmulsu.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vwmulsu.vv v26, v8, v9
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vwmulsu.vv v8, v10, v11, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32(
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vwmulsu.vv v28, v8, v10
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vwmulsu.vv v8, v12, v14, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32(
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vwmulsu.vv v16, v8, v12
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmulsu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vwmulsu.vv v8, v16, v20, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32(
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -990,3 +1170,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
; CHECK-NEXT: vwmulsu.vx v25, v8, a0
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv1i64_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
; CHECK-NEXT: vwmulsu.vx v8, v9, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
; CHECK-NEXT: vwmulsu.vx v26, v8, a0
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv2i64_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
; CHECK-NEXT: vwmulsu.vx v8, v10, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32(
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
; CHECK-NEXT: vwmulsu.vx v28, v8, a0
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv4i64_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
; CHECK-NEXT: vwmulsu.vx v8, v12, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32(
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
i32);
define <vscale x 8 x i64> @intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
; CHECK-NEXT: vwmulsu.vx v16, v8, a0
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmulsu_mask_vx_nxv8i64_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
; CHECK-NEXT: vwmulsu.vx v8, v16, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32(
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -496,6 +496,186 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vwmulu.vv v25, v8, v9
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vwmulu.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vwmulu.vv v26, v8, v9
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vwmulu.vv v8, v10, v11, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32(
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vwmulu.vv v28, v8, v10
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vwmulu.vv v8, v12, v14, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32(
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vwmulu.vv v16, v8, v12
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmulu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu
; CHECK-NEXT: vwmulu.vv v8, v16, v20, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32(
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -990,3 +1170,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
; CHECK-NEXT: vwmulu.vx v25, v8, a0
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32(
<vscale x 1 x i32> %0,
i32 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv1i64_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
; CHECK-NEXT: vwmulu.vx v8, v9, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
; CHECK-NEXT: vwmulu.vx v26, v8, a0
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32(
<vscale x 2 x i32> %0,
i32 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv2i64_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
; CHECK-NEXT: vwmulu.vx v8, v10, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32(
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
; CHECK-NEXT: vwmulu.vx v28, v8, a0
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32(
<vscale x 4 x i32> %0,
i32 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv4i64_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
; CHECK-NEXT: vwmulu.vx v8, v12, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32(
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
i32);
define <vscale x 8 x i64> @intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
; CHECK-NEXT: vwmulu.vx v16, v8, a0
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32(
<vscale x 8 x i32> %0,
i32 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwmulu_mask_vx_nxv8i64_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
; CHECK-NEXT: vwmulu.vx v8, v16, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32(
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -598,3 +598,233 @@ entry:
ret <vscale x 2 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32(
<vscale x 1 x i64>,
<vscale x 2 x i32>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32(
<vscale x 1 x i64> %0,
<vscale x 2 x i32> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.nxv1i64(
<vscale x 1 x i64>,
<vscale x 2 x i32>,
<vscale x 1 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 2 x i32> %1,
<vscale x 1 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32(
<vscale x 1 x i64>,
<vscale x 4 x i32>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vwredsum.vs v8, v10, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32(
<vscale x 1 x i64> %0,
<vscale x 4 x i32> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.nxv1i64(
<vscale x 1 x i64>,
<vscale x 4 x i32>,
<vscale x 1 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vwredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 4 x i32> %1,
<vscale x 1 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32(
<vscale x 1 x i64>,
<vscale x 8 x i32>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vwredsum.vs v8, v12, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32(
<vscale x 1 x i64> %0,
<vscale x 8 x i32> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.nxv1i64(
<vscale x 1 x i64>,
<vscale x 8 x i32>,
<vscale x 1 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vwredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 8 x i32> %1,
<vscale x 1 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32(
<vscale x 1 x i64>,
<vscale x 16 x i32>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu
; CHECK-NEXT: vwredsum.vs v8, v16, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32(
<vscale x 1 x i64> %0,
<vscale x 16 x i32> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.nxv1i64(
<vscale x 1 x i64>,
<vscale x 16 x i32>,
<vscale x 1 x i64>,
<vscale x 16 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu
; CHECK-NEXT: vwredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 16 x i32> %1,
<vscale x 1 x i64> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}

View File

@ -598,3 +598,233 @@ entry:
ret <vscale x 2 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32(
<vscale x 1 x i64>,
<vscale x 2 x i32>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32(
<vscale x 1 x i64> %0,
<vscale x 2 x i32> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.nxv1i64(
<vscale x 1 x i64>,
<vscale x 2 x i32>,
<vscale x 1 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 2 x i32> %1,
<vscale x 1 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32(
<vscale x 1 x i64>,
<vscale x 4 x i32>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vwredsumu.vs v8, v10, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32(
<vscale x 1 x i64> %0,
<vscale x 4 x i32> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.nxv1i64(
<vscale x 1 x i64>,
<vscale x 4 x i32>,
<vscale x 1 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vwredsumu.vs v8, v10, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 4 x i32> %1,
<vscale x 1 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32(
<vscale x 1 x i64>,
<vscale x 8 x i32>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vwredsumu.vs v8, v12, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32(
<vscale x 1 x i64> %0,
<vscale x 8 x i32> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.nxv1i64(
<vscale x 1 x i64>,
<vscale x 8 x i32>,
<vscale x 1 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vwredsumu.vs v8, v12, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 8 x i32> %1,
<vscale x 1 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32(
<vscale x 1 x i64>,
<vscale x 16 x i32>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu
; CHECK-NEXT: vwredsumu.vs v8, v16, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32(
<vscale x 1 x i64> %0,
<vscale x 16 x i32> %1,
<vscale x 1 x i64> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.nxv1i64(
<vscale x 1 x i64>,
<vscale x 16 x i32>,
<vscale x 1 x i64>,
<vscale x 16 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu
; CHECK-NEXT: vwredsumu.vs v8, v16, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 16 x i32> %1,
<vscale x 1 x i64> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}

View File

@ -498,6 +498,187 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
i32);
define <vscale x 1 x i64> @intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
; CHECK-NEXT: vwsub.wv v25, v8, v9
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu
; CHECK-NEXT: vwsub.wv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
i32);
define <vscale x 2 x i64> @intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
; CHECK-NEXT: vwsub.wv v26, v8, v10
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32(
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu
; CHECK-NEXT: vwsub.wv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
i32);
define <vscale x 4 x i64> @intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
; CHECK-NEXT: vwsub.wv v28, v8, v12
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32(
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu
; CHECK-NEXT: vwsub.wv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
i32);
define <vscale x 8 x i64> @intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
; CHECK-NEXT: vwsub.wv v24, v8, v16
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32(
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl4re32.v v28, (a0)
; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu
; CHECK-NEXT: vwsub.wv v8, v16, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8(
<vscale x 1 x i16>,
i8,
@ -992,3 +1173,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32(
<vscale x 1 x i64>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
; CHECK-NEXT: vwsub.wx v25, v8, a0
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32(
<vscale x 1 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
; CHECK-NEXT: vwsub.wx v8, v9, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32(
<vscale x 2 x i64>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
; CHECK-NEXT: vwsub.wx v26, v8, a0
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32(
<vscale x 2 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.i32(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
; CHECK-NEXT: vwsub.wx v8, v10, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.i32(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32(
<vscale x 4 x i64>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
; CHECK-NEXT: vwsub.wx v28, v8, a0
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32(
<vscale x 4 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.i32(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
; CHECK-NEXT: vwsub.wx v8, v12, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.i32(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32(
<vscale x 8 x i64>,
i32,
i32);
define <vscale x 8 x i64> @intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
; CHECK-NEXT: vwsub.wx v16, v8, a0
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32(
<vscale x 8 x i64> %0,
i32 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.i32(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
; CHECK-NEXT: vwsub.wx v8, v16, a0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.i32(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}

View File

@ -796,6 +796,183 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
define <vscale x 1 x i64> @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vxor.vv v8, v8, v9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vxor.vv v8, v9, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
define <vscale x 2 x i64> @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vxor.vv v8, v8, v10
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vxor.vv v8, v10, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
define <vscale x 4 x i64> @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vxor.vv v8, v8, v12
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vxor.vv v8, v12, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i32);
define <vscale x 8 x i64> @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vxor.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu
; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
@ -1588,6 +1765,252 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
i32);
define <vscale x 1 x i64> @intrinsic_vxor_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vxor.vv v8, v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu
; CHECK-NEXT: vxor.vv v8, v9, v25, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
i32);
define <vscale x 2 x i64> @intrinsic_vxor_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vxor.vv v8, v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu
; CHECK-NEXT: vxor.vv v8, v10, v26, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
i32);
define <vscale x 4 x i64> @intrinsic_vxor_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: vxor.vv v8, v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vsrl.vx v16, v16, a1
; CHECK-NEXT: vor.vv v28, v16, v28
; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu
; CHECK-NEXT: vxor.vv v8, v12, v28, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
i32);
define <vscale x 8 x i64> @intrinsic_vxor_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vxor_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a2, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: vxor.vv v8, v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 %1,
i32 %2)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vx_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrrs a3, vlenb, zero
; CHECK-NEXT: sub sp, sp, a3
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v24, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v0, v24, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v24, v24, v0
; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1re8.v v0, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t
; CHECK-NEXT: csrrs a0, vlenb, zero
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i64> %a
}
define <vscale x 1 x i8> @intrinsic_vxor_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
@ -2163,3 +2586,131 @@ entry:
ret <vscale x 16 x i32> %a
}
define <vscale x 1 x i64> @intrinsic_vxor_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vxor.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 1 x i64> %a
}
define <vscale x 1 x i64> @intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vxor.vi v8, v9, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i32 %3)
ret <vscale x 1 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vxor_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vxor.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64(
<vscale x 2 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 2 x i64> %a
}
define <vscale x 2 x i64> @intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vxor.vi v8, v10, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i32 %3)
ret <vscale x 2 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vxor_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vxor.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64(
<vscale x 4 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 4 x i64> %a
}
define <vscale x 4 x i64> @intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vxor.vi v8, v12, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i32 %3)
ret <vscale x 4 x i64> %a
}
define <vscale x 8 x i64> @intrinsic_vxor_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vxor_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vxor.vi v8, v8, 9
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64(
<vscale x 8 x i64> %0,
i64 9,
i32 %1)
ret <vscale x 8 x i64> %a
}
define <vscale x 8 x i64> @intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vxor_mask_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: vxor.vi v8, v16, 9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 9,
<vscale x 8 x i1> %2,
i32 %3)
ret <vscale x 8 x i64> %a
}

View File

@ -1,6 +1,334 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
<vscale x 1 x i8>,
i32);
define <vscale x 1 x i64> @intrinsic_vzext_vf8_nxv1i64(<vscale x 1 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vzext_vf8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vzext.vf8 v25, v8
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
<vscale x 1 x i8> %0,
i32 %1)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
<vscale x 1 x i64>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vzext_mask_vf8_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vzext.vf8 v8, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
<vscale x 1 x i64> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %0,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8(
<vscale x 2 x i8>,
i32);
define <vscale x 2 x i64> @intrinsic_vzext_vf8_nxv2i64(<vscale x 2 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vzext_vf8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vzext.vf8 v26, v8
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8(
<vscale x 2 x i8> %0,
i32 %1)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8(
<vscale x 2 x i64>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vzext_mask_vf8_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vzext.vf8 v8, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8(
<vscale x 2 x i64> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %0,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8(
<vscale x 4 x i8>,
i32);
define <vscale x 4 x i64> @intrinsic_vzext_vf8_nxv4i64(<vscale x 4 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vzext_vf8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vzext.vf8 v28, v8
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8(
<vscale x 4 x i8> %0,
i32 %1)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8(
<vscale x 4 x i64>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vzext_mask_vf8_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vzext.vf8 v8, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8(
<vscale x 4 x i64> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %0,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8(
<vscale x 8 x i8>,
i32);
define <vscale x 8 x i64> @intrinsic_vzext_vf8_nxv8i64(<vscale x 8 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vzext_vf8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vzext.vf8 v16, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8(
<vscale x 8 x i8> %0,
i32 %1)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8(
<vscale x 8 x i64>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vzext_mask_vf8_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: vzext.vf8 v8, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8(
<vscale x 8 x i64> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %0,
i32 %3)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16(
<vscale x 1 x i16>,
i32);
define <vscale x 1 x i64> @intrinsic_vzext_vf4_nxv1i64(<vscale x 1 x i16> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vzext_vf4_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vzext.vf4 v25, v8
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16(
<vscale x 1 x i16> %0,
i32 %1)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
<vscale x 1 x i64>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vzext_mask_vf4_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vzext.vf4 v8, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
<vscale x 1 x i64> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %0,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16(
<vscale x 2 x i16>,
i32);
define <vscale x 2 x i64> @intrinsic_vzext_vf4_nxv2i64(<vscale x 2 x i16> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vzext_vf4_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vzext.vf4 v26, v8
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16(
<vscale x 2 x i16> %0,
i32 %1)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16(
<vscale x 2 x i64>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vzext_mask_vf4_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vzext.vf4 v8, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16(
<vscale x 2 x i64> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %0,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16(
<vscale x 4 x i16>,
i32);
define <vscale x 4 x i64> @intrinsic_vzext_vf4_nxv4i64(<vscale x 4 x i16> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vzext_vf4_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vzext.vf4 v28, v8
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16(
<vscale x 4 x i16> %0,
i32 %1)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16(
<vscale x 4 x i64>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vzext_mask_vf4_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vzext.vf4 v8, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16(
<vscale x 4 x i64> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %0,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16(
<vscale x 8 x i16>,
i32);
define <vscale x 8 x i64> @intrinsic_vzext_vf4_nxv8i64(<vscale x 8 x i16> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vzext_vf4_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vzext.vf4 v16, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16(
<vscale x 8 x i16> %0,
i32 %1)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16(
<vscale x 8 x i64>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vzext_mask_vf4_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: vzext.vf4 v8, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16(
<vscale x 8 x i64> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %0,
i32 %3)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8(
<vscale x 1 x i8>,
i32);
@ -206,6 +534,170 @@ entry:
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32(
<vscale x 1 x i32>,
i32);
define <vscale x 1 x i64> @intrinsic_vzext_vf2_nxv1i64(<vscale x 1 x i32> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vzext_vf2_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
; CHECK-NEXT: vzext.vf2 v25, v8
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32(
<vscale x 1 x i32> %0,
i32 %1)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i64> @intrinsic_vzext_mask_vf2_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu
; CHECK-NEXT: vzext.vf2 v8, v9, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
<vscale x 1 x i64> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %0,
i32 %3)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32(
<vscale x 2 x i32>,
i32);
define <vscale x 2 x i64> @intrinsic_vzext_vf2_nxv2i64(<vscale x 2 x i32> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vzext_vf2_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
; CHECK-NEXT: vzext.vf2 v26, v8
; CHECK-NEXT: vmv2r.v v8, v26
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32(
<vscale x 2 x i32> %0,
i32 %1)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i64> @intrinsic_vzext_mask_vf2_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu
; CHECK-NEXT: vzext.vf2 v8, v10, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i32(
<vscale x 2 x i64> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %0,
i32 %3)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32(
<vscale x 4 x i32>,
i32);
define <vscale x 4 x i64> @intrinsic_vzext_vf2_nxv4i64(<vscale x 4 x i32> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vzext_vf2_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
; CHECK-NEXT: vzext.vf2 v28, v8
; CHECK-NEXT: vmv4r.v v8, v28
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32(
<vscale x 4 x i32> %0,
i32 %1)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i64> @intrinsic_vzext_mask_vf2_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu
; CHECK-NEXT: vzext.vf2 v8, v12, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i32(
<vscale x 4 x i64> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %0,
i32 %3)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32(
<vscale x 8 x i32>,
i32);
define <vscale x 8 x i64> @intrinsic_vzext_vf2_nxv8i64(<vscale x 8 x i32> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vzext_vf2_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
; CHECK-NEXT: vzext.vf2 v16, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32(
<vscale x 8 x i32> %0,
i32 %1)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i64> @intrinsic_vzext_mask_vf2_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, a0, e64,m8,tu,mu
; CHECK-NEXT: vzext.vf2 v8, v16, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i32(
<vscale x 8 x i64> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %0,
i32 %3)
ret <vscale x 8 x i64> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16(
<vscale x 1 x i16>,
i32);