[RISCV][Clang] Add all RVV Reduction intrinsic functions.

Authored-by: Roger Ferrer Ibanez <rofirrim@gmail.com>
Co-Authored-by: Zakk Chen <zakk.chen@sifive.com>

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D99964
This commit is contained in:
Zakk Chen 2021-04-06 07:57:41 -07:00
parent 07c3854a75
commit c680b0dabf
24 changed files with 21760 additions and 0 deletions

View File

@ -404,6 +404,29 @@ class RVVConvToNarrowingSignedBuiltin<string mangled_name>
class RVVConvToNarrowingUnsignedBuiltin<string mangled_name>
: RVVConvBuiltin<"Uv", "UvFw", "si", mangled_name>;
let HasMaskedOffOperand = false in {
multiclass RVVSignedReductionBuiltin {
defm "" : RVVOutOp1BuiltinSet<NAME, "csil",
[["vs", "vSv", "SvSvvSv"]]>;
}
multiclass RVVUnsignedReductionBuiltin {
defm "" : RVVOutOp1BuiltinSet<NAME, "csil",
[["vs", "UvUSv", "USvUSvUvUSv"]]>;
}
multiclass RVVFloatingReductionBuiltin {
defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
[["vs", "vSv", "SvSvvSv"]]>;
}
multiclass RVVFloatingWidenReductionBuiltin {
defm "" : RVVOutOp1BuiltinSet<NAME, "f",
[["vs", "vSw", "SwSwvSw"]]>;
}
}
multiclass RVVIntReductionBuiltinSet
: RVVSignedReductionBuiltin,
RVVUnsignedReductionBuiltin;
// For widen operation which has different mangling name.
multiclass RVVWidenBuiltinSet<string intrinsic_name, string type_range,
list<list<string>> suffixes_prototypes> {
@ -961,3 +984,33 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
def vfncvt_f_f_w : RVVConvBuiltin<"v", "vw", "f", "vfncvt_f">;
def vfncvt_rod_f_f_w : RVVConvBuiltin<"v", "vw", "f", "vfncvt_rod_f">;
}
// 15. Vector Reduction Operations
// 15.1. Vector Single-Width Integer Reduction Instructions
defm vredsum : RVVIntReductionBuiltinSet;
defm vredmaxu : RVVUnsignedReductionBuiltin;
defm vredmax : RVVSignedReductionBuiltin;
defm vredminu : RVVUnsignedReductionBuiltin;
defm vredmin : RVVSignedReductionBuiltin;
defm vredand : RVVIntReductionBuiltinSet;
defm vredor : RVVIntReductionBuiltinSet;
defm vredxor : RVVIntReductionBuiltinSet;
// 15.2. Vector Widening Integer Reduction Instructions
// Vector Widening Integer Reduction Operations
let HasMaskedOffOperand = false in {
defm vwredsum : RVVOutOp1BuiltinSet<"vwredsum", "csi",
[["vs", "vSw", "SwSwvSw"]]>;
defm vwredsumu : RVVOutOp1BuiltinSet<"vwredsumu", "csi",
[["vs", "UvUSw", "USwUSwUvUSw"]]>;
}
// 15.3. Vector Single-Width Floating-Point Reduction Instructions
defm vfredmax : RVVFloatingReductionBuiltin;
defm vfredmin : RVVFloatingReductionBuiltin;
defm vfredsum : RVVFloatingReductionBuiltin;
defm vfredosum : RVVFloatingReductionBuiltin;
// 15.4. Vector Widening Floating-Point Reduction Instructions
defm vfwredsum : RVVFloatingWidenReductionBuiltin;
defm vfwredosum : RVVFloatingWidenReductionBuiltin;

View File

@ -0,0 +1,291 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
// ASM-NOT: warning
#include <riscv_vector.h>
// CHECK-RV32-LABEL: @test_vfredmax_vs_f32mf2_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32m1_t dst,
vfloat32mf2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmax(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m1_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmax(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m2_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmax(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m4_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmax(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m8_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmax(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m1_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmax(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m2_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmax(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m4_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmax(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m8_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmax(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
vfloat32mf2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmax(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m1_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
vfloat32m1_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmax(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m2_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
vfloat32m2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmax(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m4_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
vfloat32m4_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmax(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m8_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
vfloat32m8_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmax(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m1_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
vfloat64m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmax(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m2_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
vfloat64m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmax(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m4_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
vfloat64m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmax(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m8_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
vfloat64m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmax(mask, dst, vector, scalar, vl);
}

View File

@ -0,0 +1,291 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
// ASM-NOT: warning
#include <riscv_vector.h>
// CHECK-RV32-LABEL: @test_vfredmin_vs_f32mf2_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32m1_t dst,
vfloat32mf2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmin(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m1_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmin(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m2_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmin(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m4_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmin(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m8_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmin(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m1_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmin(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m2_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmin(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m4_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmin(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m8_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmin(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
vfloat32mf2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmin(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m1_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
vfloat32m1_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmin(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m2_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
vfloat32m2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmin(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m4_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
vfloat32m4_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmin(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m8_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
vfloat32m8_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmin(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m1_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
vfloat64m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmin(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m2_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
vfloat64m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmin(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m4_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
vfloat64m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmin(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m8_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
vfloat64m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmin(mask, dst, vector, scalar, vl);
}

View File

@ -0,0 +1,579 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
// ASM-NOT: warning
#include <riscv_vector.h>
// CHECK-RV32-LABEL: @test_vfredsum_vs_f32mf2_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1(vfloat32m1_t dst,
vfloat32mf2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredsum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m1_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredsum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredsum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m2_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredsum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredsum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m4_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredsum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredsum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m8_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredsum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredsum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m1_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredsum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredsum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m2_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredsum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredsum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m4_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredsum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredsum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m8_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredsum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredsum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
vfloat32mf2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m1_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
vfloat32m1_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m2_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
vfloat32m2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m4_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
vfloat32m4_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m8_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
vfloat32m8_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m1_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
vfloat64m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m2_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
vfloat64m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m4_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
vfloat64m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m8_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
vfloat64m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f32mf2_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32m1_t dst,
vfloat32mf2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredosum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m1_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t dst,
vfloat32m1_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredosum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m2_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m1_t dst,
vfloat32m2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredosum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m4_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m1_t dst,
vfloat32m4_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredosum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m8_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m1_t dst,
vfloat32m8_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredosum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m1_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t dst,
vfloat64m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredosum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m2_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m1_t dst,
vfloat64m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredosum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m4_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m1_t dst,
vfloat64m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredosum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m8_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m1_t dst,
vfloat64m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredosum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
vfloat32mf2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredosum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m1_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
vfloat32m1_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredosum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m2_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
vfloat32m2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredosum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m4_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
vfloat32m4_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredosum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m8_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
vfloat32m8_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredosum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m1_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
vfloat64m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredosum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m2_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
vfloat64m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredosum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m4_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
vfloat64m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredosum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m8_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
vfloat64m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredosum(mask, dst, vector, scalar, vl);
}

View File

@ -0,0 +1,331 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
// ASM-NOT: warning
#include <riscv_vector.h>
// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32mf2_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1(vfloat64m1_t dst,
vfloat32mf2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredsum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m1_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1(vfloat64m1_t dst,
vfloat32m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredsum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m2_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1(vfloat64m1_t dst,
vfloat32m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredsum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m4_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1(vfloat64m1_t dst,
vfloat32m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredsum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m8_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat64m1_t dst,
vfloat32m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredsum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
vfloat32mf2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
vfloat32m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
vfloat32m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
vfloat32m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
vfloat32m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredsum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32mf2_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat64m1_t dst,
vfloat32mf2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredosum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m1_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat64m1_t dst,
vfloat32m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredosum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m2_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat64m1_t dst,
vfloat32m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredosum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m4_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat64m1_t dst,
vfloat32m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredosum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m8_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat64m1_t dst,
vfloat32m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredosum(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
vfloat32mf2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredosum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
vfloat32m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredosum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
vfloat32m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredosum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
vfloat32m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredosum(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
vfloat32m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredosum(mask, dst, vector, scalar, vl);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,291 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
// ASM-NOT: warning
#include <riscv_vector.h>
// CHECK-RV32-LABEL: @test_vfredmax_vs_f32mf2_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32m1_t dst,
vfloat32mf2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmax_vs_f32mf2_f32m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m1_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmax_vs_f32m1_f32m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m2_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmax_vs_f32m2_f32m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m4_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmax_vs_f32m4_f32m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m8_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmax_vs_f32m8_f32m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m1_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmax_vs_f64m1_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m2_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmax_vs_f64m2_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m4_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmax_vs_f64m4_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m8_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmax_vs_f64m8_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
vfloat32mf2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmax_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m1_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
vfloat32m1_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmax_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m2_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
vfloat32m2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmax_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m4_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
vfloat32m4_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmax_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m8_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
vfloat32m8_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmax_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m1_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
vfloat64m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmax_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m2_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
vfloat64m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmax_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m4_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
vfloat64m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmax_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m8_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
vfloat64m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmax_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl);
}

View File

@ -0,0 +1,291 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
// ASM-NOT: warning
#include <riscv_vector.h>
// CHECK-RV32-LABEL: @test_vfredmin_vs_f32mf2_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32m1_t dst,
vfloat32mf2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmin_vs_f32mf2_f32m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m1_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmin_vs_f32m1_f32m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m2_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmin_vs_f32m2_f32m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m4_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmin_vs_f32m4_f32m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m8_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmin_vs_f32m8_f32m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m1_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmin_vs_f64m1_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m2_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmin_vs_f64m2_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m4_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmin_vs_f64m4_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m8_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmin_vs_f64m8_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
vfloat32mf2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmin_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m1_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
vfloat32m1_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmin_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m2_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
vfloat32m2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmin_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m4_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
vfloat32m4_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmin_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m8_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
vfloat32m8_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredmin_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m1_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
vfloat64m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmin_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m2_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
vfloat64m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmin_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m4_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
vfloat64m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmin_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m8_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
vfloat64m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredmin_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl);
}

View File

@ -0,0 +1,579 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
// ASM-NOT: warning
#include <riscv_vector.h>
// CHECK-RV32-LABEL: @test_vfredsum_vs_f32mf2_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1(vfloat32m1_t dst,
vfloat32mf2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredsum_vs_f32mf2_f32m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m1_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredsum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredsum_vs_f32m1_f32m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m2_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredsum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredsum_vs_f32m2_f32m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m4_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredsum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredsum_vs_f32m4_f32m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m8_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredsum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredsum_vs_f32m8_f32m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m1_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredsum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredsum_vs_f64m1_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m2_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredsum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredsum_vs_f64m2_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m4_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredsum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredsum_vs_f64m4_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m8_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredsum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredsum_vs_f64m8_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
vfloat32mf2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredsum_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m1_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
vfloat32m1_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredsum_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m2_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
vfloat32m2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredsum_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m4_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
vfloat32m4_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredsum_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m8_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
vfloat32m8_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredsum_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m1_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
vfloat64m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredsum_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m2_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
vfloat64m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredsum_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m4_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
vfloat64m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredsum_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m8_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
vfloat64m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredsum_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f32mf2_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32m1_t dst,
vfloat32mf2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredosum_vs_f32mf2_f32m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m1_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t dst,
vfloat32m1_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredosum_vs_f32m1_f32m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m2_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m1_t dst,
vfloat32m2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredosum_vs_f32m2_f32m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m4_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m1_t dst,
vfloat32m4_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredosum_vs_f32m4_f32m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m8_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m1_t dst,
vfloat32m8_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredosum_vs_f32m8_f32m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m1_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t dst,
vfloat64m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredosum_vs_f64m1_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m2_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m1_t dst,
vfloat64m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredosum_vs_f64m2_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m4_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m1_t dst,
vfloat64m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredosum_vs_f64m4_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m8_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m1_t dst,
vfloat64m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredosum_vs_f64m8_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
vfloat32mf2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredosum_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m1_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
vfloat32m1_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredosum_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m2_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
vfloat32m2_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredosum_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m4_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
vfloat32m4_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredosum_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m8_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
vfloat32m8_t vector,
vfloat32m1_t scalar, size_t vl) {
return vfredosum_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m1_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
vfloat64m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredosum_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m2_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
vfloat64m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredosum_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m4_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
vfloat64m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredosum_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m8_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
vfloat64m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfredosum_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl);
}

View File

@ -0,0 +1,171 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
// ASM-NOT: warning
#include <riscv_vector.h>
// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32mf2_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat64m1_t dst,
vfloat32mf2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredosum_vs_f32mf2_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m1_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat64m1_t dst,
vfloat32m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredosum_vs_f32m1_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m2_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat64m1_t dst,
vfloat32m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredosum_vs_f32m2_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m4_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat64m1_t dst,
vfloat32m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredosum_vs_f32m4_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m8_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat64m1_t dst,
vfloat32m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredosum_vs_f32m8_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
vfloat32mf2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredosum_vs_f32mf2_f64m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
vfloat32m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredosum_vs_f32m1_f64m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
vfloat32m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredosum_vs_f32m2_f64m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
vfloat32m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredosum_vs_f32m4_f64m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
vfloat32m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredosum_vs_f32m8_f64m1_m(mask, dst, vector, scalar, vl);
}

View File

@ -0,0 +1,171 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
// ASM-NOT: warning
#include <riscv_vector.h>
// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32mf2_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1(vfloat64m1_t dst,
vfloat32mf2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredsum_vs_f32mf2_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m1_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1(vfloat64m1_t dst,
vfloat32m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredsum_vs_f32m1_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m2_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1(vfloat64m1_t dst,
vfloat32m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredsum_vs_f32m2_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m4_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1(vfloat64m1_t dst,
vfloat32m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredsum_vs_f32m4_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m8_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat64m1_t dst,
vfloat32m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredsum_vs_f32m8_f64m1(dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
vfloat32mf2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredsum_vs_f32mf2_f64m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
vfloat32m1_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredsum_vs_f32m1_f64m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
vfloat32m2_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredsum_vs_f32m2_f64m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
vfloat32m4_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredsum_vs_f32m4_f64m1_m(mask, dst, vector, scalar, vl);
}
// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
vfloat32m8_t vector,
vfloat64m1_t scalar, size_t vl) {
return vfwredsum_vs_f32m8_f64m1_m(mask, dst, vector, scalar, vl);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff