forked from OSchip/llvm-project
930 lines
52 KiB
C
930 lines
52 KiB
C
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
|
// REQUIRES: riscv-registered-target
|
|
// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
|
|
|
#include <riscv_vector.h>
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
|
|
vint8m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
|
|
vint8m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
|
|
vint8m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
|
|
vint8m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
|
|
vint8m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
|
|
vint8m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
|
|
vint8m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
|
|
vint16m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
|
|
vint16m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
|
|
vint16m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
|
|
vint16m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
|
|
vint16m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
|
|
vint16m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
|
//
|
|
vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
|
|
vint32m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
|
//
|
|
vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
|
|
vint32m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
|
//
|
|
vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
|
|
vint32m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
|
//
|
|
vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
|
|
vint32m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
|
//
|
|
vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
|
|
vint32m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
//
|
|
vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
|
|
vint64m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
//
|
|
vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
|
|
vint64m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
//
|
|
vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
|
|
vint64m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
//
|
|
vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
|
|
vint64m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
|
|
vuint8m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
|
|
vuint8m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
|
|
vuint8m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
|
|
vuint8m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
|
|
vuint8m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
|
|
vuint8m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
|
|
vuint8m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
|
|
vuint16m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
|
|
vuint16m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
|
|
vuint16m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
|
|
vuint16m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
|
|
vuint16m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
|
|
vuint16m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
|
//
|
|
vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
|
|
vuint32m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
|
//
|
|
vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
|
|
vuint32m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
|
//
|
|
vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
|
|
vuint32m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
|
//
|
|
vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
|
|
vuint32m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
|
//
|
|
vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
|
|
vuint32m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
//
|
|
vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
|
|
vuint64m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
//
|
|
vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
|
|
vuint64m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
//
|
|
vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
|
|
vuint64m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
//
|
|
vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
|
|
vuint64m1_t scalar, size_t vl) {
|
|
return vredsum(dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
|
|
vint8mf8_t vector, vint8m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
|
|
vint8mf4_t vector, vint8m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
|
|
vint8mf2_t vector, vint8m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
|
|
vint8m1_t vector, vint8m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
|
|
vint8m2_t vector, vint8m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
|
|
vint8m4_t vector, vint8m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
|
|
vint8m8_t vector, vint8m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
|
|
vint16mf4_t vector, vint16m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
|
|
vint16mf2_t vector, vint16m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
|
|
vint16m1_t vector, vint16m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
|
|
vint16m2_t vector, vint16m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
|
|
vint16m4_t vector, vint16m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
|
|
vint16m8_t vector, vint16m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
|
//
|
|
vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
|
|
vint32mf2_t vector, vint32m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
|
//
|
|
vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
|
|
vint32m1_t vector, vint32m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
|
//
|
|
vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
|
|
vint32m2_t vector, vint32m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
|
//
|
|
vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
|
|
vint32m4_t vector, vint32m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
|
//
|
|
vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
|
|
vint32m8_t vector, vint32m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
//
|
|
vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
|
|
vint64m1_t vector, vint64m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
//
|
|
vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
|
|
vint64m2_t vector, vint64m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
//
|
|
vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
|
|
vint64m4_t vector, vint64m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
//
|
|
vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
|
|
vint64m8_t vector, vint64m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
|
|
vuint8mf8_t vector, vuint8m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
|
|
vuint8mf4_t vector, vuint8m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
|
|
vuint8mf2_t vector, vuint8m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
|
|
vuint8m1_t vector, vuint8m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
|
|
vuint8m2_t vector, vuint8m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
|
|
vuint8m4_t vector, vuint8m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
|
//
|
|
vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
|
|
vuint8m8_t vector, vuint8m1_t scalar,
|
|
size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
|
|
vuint16mf4_t vector,
|
|
vuint16m1_t scalar, size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
|
|
vuint16mf2_t vector,
|
|
vuint16m1_t scalar, size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
|
|
vuint16m1_t vector,
|
|
vuint16m1_t scalar, size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
|
|
vuint16m2_t vector,
|
|
vuint16m1_t scalar, size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
|
|
vuint16m4_t vector,
|
|
vuint16m1_t scalar, size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
|
//
|
|
vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
|
|
vuint16m8_t vector,
|
|
vuint16m1_t scalar, size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
|
//
|
|
vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
|
|
vuint32mf2_t vector,
|
|
vuint32m1_t scalar, size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
|
//
|
|
vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
|
|
vuint32m1_t vector,
|
|
vuint32m1_t scalar, size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
|
//
|
|
vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
|
|
vuint32m2_t vector,
|
|
vuint32m1_t scalar, size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
|
//
|
|
vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
|
|
vuint32m4_t vector,
|
|
vuint32m1_t scalar, size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
|
//
|
|
vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
|
|
vuint32m8_t vector,
|
|
vuint32m1_t scalar, size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
//
|
|
vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
|
|
vuint64m1_t vector,
|
|
vuint64m1_t scalar, size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
//
|
|
vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
|
|
vuint64m2_t vector,
|
|
vuint64m1_t scalar, size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
//
|
|
vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
|
|
vuint64m4_t vector,
|
|
vuint64m1_t scalar, size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|
|
|
|
// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_m(
|
|
// CHECK-RV64-NEXT: entry:
|
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
//
|
|
vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
|
|
vuint64m8_t vector,
|
|
vuint64m1_t scalar, size_t vl) {
|
|
return vredsum(mask, dst, vector, scalar, vl);
|
|
}
|