forked from OSchip/llvm-project
[RISCV][Clang] Add some RVV Floating-Point intrinsic functions.
Support the following instructions which have the same class. 1. Vector Single-Width Floating-Point Subtract Instructions 2. Vector Single-Width Floating-Point Multiply/Divide Instructions 3. Vector Floating-Point MIN/MAX Instructions 4. Vector Floating-Point Sign-Injection Instructions Reviewed By: craig.topper Authored-by: Roger Ferrer Ibanez <rofirrim@gmail.com> Co-Authored-by: Zakk Chen <zakk.chen@sifive.com> Differential Revision: https://reviews.llvm.org/D99668
This commit is contained in:
parent
ccc624bfd4
commit
007ea0e736
|
@ -323,6 +323,10 @@ multiclass RVVFloatingBinBuiltinSet
|
|||
[["vv", "v", "vvv"],
|
||||
["vf", "v", "vve"]]>;
|
||||
|
||||
multiclass RVVFloatingBinVFBuiltinSet
|
||||
: RVVOutOp1BuiltinSet<NAME, "fd",
|
||||
[["vf", "v", "vve"]]>;
|
||||
|
||||
class RVVIntExt<string intrinsic_name, string suffix, string prototype,
|
||||
string type_range>
|
||||
: RVVBuiltin<suffix, prototype, type_range> {
|
||||
|
@ -767,3 +771,61 @@ defm vnclip : RVVSignedNShiftBuiltinSet;
|
|||
// 14. Vector Floating-Point Instructions
|
||||
// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
|
||||
defm vfadd : RVVFloatingBinBuiltinSet;
|
||||
defm vfsub : RVVFloatingBinBuiltinSet;
|
||||
defm vfrsub : RVVFloatingBinVFBuiltinSet;
|
||||
|
||||
// 14.3. Vector Widening Floating-Point Add/Subtract Instructions
|
||||
// TODO
|
||||
|
||||
// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
|
||||
defm vfmul : RVVFloatingBinBuiltinSet;
|
||||
defm vfdiv : RVVFloatingBinBuiltinSet;
|
||||
defm vfrdiv : RVVFloatingBinVFBuiltinSet;
|
||||
|
||||
// 14.5. Vector Widening Floating-Point Multiply
|
||||
// TODO
|
||||
|
||||
// 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
|
||||
// TODO
|
||||
|
||||
// 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
|
||||
// TODO
|
||||
|
||||
// 14.8. Vector Floating-Point Square-Root Instruction
|
||||
// TODO
|
||||
|
||||
// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
|
||||
// TODO
|
||||
|
||||
// 14.10. Vector Floating-Point Reciprocal Estimate Instruction
|
||||
// TODO
|
||||
|
||||
// 14.11. Vector Floating-Point MIN/MAX Instructions
|
||||
defm vfmin : RVVFloatingBinBuiltinSet;
|
||||
defm vfmax : RVVFloatingBinBuiltinSet;
|
||||
|
||||
// 14.12. Vector Floating-Point Sign-Injection Instructions
|
||||
defm vfsgnj : RVVFloatingBinBuiltinSet;
|
||||
defm vfsgnjn : RVVFloatingBinBuiltinSet;
|
||||
defm vfsgnjx : RVVFloatingBinBuiltinSet;
|
||||
|
||||
// 14.13. Vector Floating-Point Compare Instructions
|
||||
// TODO
|
||||
|
||||
// 14.14. Vector Floating-Point Classify Instruction
|
||||
// TODO
|
||||
|
||||
// 14.15. Vector Floating-Point Merge Instructio
|
||||
// TODO
|
||||
|
||||
// 14.16. Vector Floating-Point Move Instruction
|
||||
// TODO
|
||||
|
||||
// 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
|
||||
// TODO
|
||||
|
||||
// 14.18. Widening Floating-Point/Integer Type-Convert Instructions
|
||||
// TODO
|
||||
|
||||
// 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions
|
||||
// TODO
|
||||
|
|
|
@ -0,0 +1,551 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// REQUIRES: riscv-registered-target
|
||||
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
|
||||
|
||||
// ASM-NOT: warning
|
||||
#include <riscv_vector.h>
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, vfloat32mf2_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, vfloat32m1_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, vfloat32m2_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, vfloat32m4_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, vfloat32m8_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, vfloat64m1_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, vfloat64m2_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, vfloat64m4_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, vfloat64m8_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
|
@ -0,0 +1,551 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// REQUIRES: riscv-registered-target
|
||||
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
|
||||
|
||||
// ASM-NOT: warning
|
||||
#include <riscv_vector.h>
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
|
||||
size_t vl) {
|
||||
return vfmax(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfmax(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmax(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfmax(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmax(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfmax(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmax(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfmax(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmax(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfmax(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmax(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfmax(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmax(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfmax(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmax(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfmax(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmax(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfmax(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, vfloat32mf2_t op2,
|
||||
size_t vl) {
|
||||
return vfmax(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfmax(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, vfloat32m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmax(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfmax(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, vfloat32m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmax(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfmax(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, vfloat32m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmax(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfmax(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, vfloat32m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmax(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfmax(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, vfloat64m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmax(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfmax(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, vfloat64m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmax(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfmax(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, vfloat64m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmax(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfmax(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, vfloat64m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmax(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfmax(mask, maskedoff, op1, op2, vl);
|
||||
}
|
|
@ -0,0 +1,551 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// REQUIRES: riscv-registered-target
|
||||
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
|
||||
|
||||
// ASM-NOT: warning
|
||||
#include <riscv_vector.h>
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
|
||||
size_t vl) {
|
||||
return vfmin(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfmin(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmin(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfmin(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmin(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfmin(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmin(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfmin(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmin(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfmin(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmin(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfmin(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmin(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfmin(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmin(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfmin(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmin(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfmin(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, vfloat32mf2_t op2,
|
||||
size_t vl) {
|
||||
return vfmin(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfmin(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, vfloat32m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmin(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfmin(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, vfloat32m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmin(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfmin(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, vfloat32m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmin(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfmin(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, vfloat32m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmin(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfmin(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, vfloat64m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmin(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfmin(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, vfloat64m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmin(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfmin(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, vfloat64m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmin(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfmin(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, vfloat64m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmin(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfmin(mask, maskedoff, op1, op2, vl);
|
||||
}
|
|
@ -0,0 +1,551 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// REQUIRES: riscv-registered-target
|
||||
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
|
||||
|
||||
// ASM-NOT: warning
|
||||
#include <riscv_vector.h>
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
|
||||
size_t vl) {
|
||||
return vfmul(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfmul(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmul(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfmul(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmul(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfmul(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmul(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfmul(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmul(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfmul(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmul(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfmul(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmul(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfmul(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmul(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfmul(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmul(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfmul(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, vfloat32mf2_t op2,
|
||||
size_t vl) {
|
||||
return vfmul(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfmul(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, vfloat32m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmul(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfmul(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, vfloat32m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmul(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfmul(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, vfloat32m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmul(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfmul(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, vfloat32m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmul(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfmul(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, vfloat64m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmul(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfmul(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, vfloat64m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmul(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfmul(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, vfloat64m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmul(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfmul(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, vfloat64m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmul(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfmul(mask, maskedoff, op1, op2, vl);
|
||||
}
|
|
@ -0,0 +1,272 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// REQUIRES: riscv-registered-target
|
||||
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
|
||||
|
||||
// ASM-NOT: warning
|
||||
#include <riscv_vector.h>
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfrdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfrdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfrdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfrdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfrdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfrdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfrdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfrdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfrdiv(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfrdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfrdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfrdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfrdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfrdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfrdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfrdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfrdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfrdiv(mask, maskedoff, op1, op2, vl);
|
||||
}
|
|
@ -0,0 +1,272 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// REQUIRES: riscv-registered-target
|
||||
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
|
||||
|
||||
// ASM-NOT: warning
|
||||
#include <riscv_vector.h>
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfrsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfrsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfrsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfrsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfrsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfrsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfrsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfrsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfrsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfrsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfrsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfrsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfrsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfrsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfrsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfrsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfrsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfrsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,551 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// REQUIRES: riscv-registered-target
|
||||
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
|
||||
|
||||
// ASM-NOT: warning
|
||||
#include <riscv_vector.h>
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
|
||||
size_t vl) {
|
||||
return vfsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
|
||||
size_t vl) {
|
||||
return vfsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
|
||||
size_t vl) {
|
||||
return vfsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
|
||||
size_t vl) {
|
||||
return vfsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
|
||||
size_t vl) {
|
||||
return vfsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
|
||||
size_t vl) {
|
||||
return vfsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
|
||||
size_t vl) {
|
||||
return vfsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
|
||||
size_t vl) {
|
||||
return vfsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
|
||||
size_t vl) {
|
||||
return vfsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfsub(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, vfloat32mf2_t op2,
|
||||
size_t vl) {
|
||||
return vfsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, vfloat32m1_t op2,
|
||||
size_t vl) {
|
||||
return vfsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, vfloat32m2_t op2,
|
||||
size_t vl) {
|
||||
return vfsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, vfloat32m4_t op2,
|
||||
size_t vl) {
|
||||
return vfsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, vfloat32m8_t op2,
|
||||
size_t vl) {
|
||||
return vfsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, vfloat64m1_t op2,
|
||||
size_t vl) {
|
||||
return vfsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, vfloat64m2_t op2,
|
||||
size_t vl) {
|
||||
return vfsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, vfloat64m4_t op2,
|
||||
size_t vl) {
|
||||
return vfsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, vfloat64m8_t op2,
|
||||
size_t vl) {
|
||||
return vfsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfsub(mask, maskedoff, op1, op2, vl);
|
||||
}
|
|
@ -0,0 +1,551 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// REQUIRES: riscv-registered-target
|
||||
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
|
||||
|
||||
// ASM-NOT: warning
|
||||
#include <riscv_vector.h>
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv_vv_f32mf2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfdiv_vf_f32mf2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv_vv_f32m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfdiv_vf_f32m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv_vv_f32m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfdiv_vf_f32m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv_vv_f32m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfdiv_vf_f32m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv_vv_f32m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfdiv_vf_f32m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv_vv_f64m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfdiv_vf_f64m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv_vv_f64m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfdiv_vf_f64m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv_vv_f64m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfdiv_vf_f64m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv_vv_f64m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfdiv_vf_f64m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, vfloat32mf2_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfdiv_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, vfloat32m1_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfdiv_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, vfloat32m2_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfdiv_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, vfloat32m4_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfdiv_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, vfloat32m8_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfdiv_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, vfloat64m1_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfdiv_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, vfloat64m2_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfdiv_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, vfloat64m4_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfdiv_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, vfloat64m8_t op2,
|
||||
size_t vl) {
|
||||
return vfdiv_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfdiv_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
|
@ -0,0 +1,551 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// REQUIRES: riscv-registered-target
|
||||
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
|
||||
|
||||
// ASM-NOT: warning
|
||||
#include <riscv_vector.h>
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
|
||||
size_t vl) {
|
||||
return vfmax_vv_f32mf2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfmax_vf_f32mf2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmax_vv_f32m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfmax_vf_f32m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmax_vv_f32m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfmax_vf_f32m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmax_vv_f32m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfmax_vf_f32m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmax_vv_f32m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfmax_vf_f32m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmax_vv_f64m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfmax_vf_f64m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmax_vv_f64m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfmax_vf_f64m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmax_vv_f64m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfmax_vf_f64m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmax_vv_f64m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfmax_vf_f64m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, vfloat32mf2_t op2,
|
||||
size_t vl) {
|
||||
return vfmax_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfmax_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, vfloat32m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmax_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfmax_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, vfloat32m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmax_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfmax_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, vfloat32m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmax_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfmax_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, vfloat32m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmax_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfmax_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, vfloat64m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmax_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfmax_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, vfloat64m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmax_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfmax_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, vfloat64m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmax_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfmax_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vv_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, vfloat64m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmax_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmax_vf_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfmax_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
|
@ -0,0 +1,551 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// REQUIRES: riscv-registered-target
|
||||
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
|
||||
|
||||
// ASM-NOT: warning
|
||||
#include <riscv_vector.h>
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
|
||||
size_t vl) {
|
||||
return vfmin_vv_f32mf2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfmin_vf_f32mf2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmin_vv_f32m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfmin_vf_f32m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmin_vv_f32m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfmin_vf_f32m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmin_vv_f32m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfmin_vf_f32m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmin_vv_f32m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfmin_vf_f32m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmin_vv_f64m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfmin_vf_f64m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmin_vv_f64m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfmin_vf_f64m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmin_vv_f64m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfmin_vf_f64m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmin_vv_f64m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfmin_vf_f64m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, vfloat32mf2_t op2,
|
||||
size_t vl) {
|
||||
return vfmin_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfmin_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, vfloat32m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmin_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfmin_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, vfloat32m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmin_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfmin_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, vfloat32m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmin_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfmin_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, vfloat32m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmin_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfmin_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, vfloat64m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmin_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfmin_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, vfloat64m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmin_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfmin_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, vfloat64m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmin_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfmin_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vv_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, vfloat64m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmin_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmin_vf_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfmin_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
|
@ -0,0 +1,551 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// REQUIRES: riscv-registered-target
|
||||
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
|
||||
|
||||
// ASM-NOT: warning
|
||||
#include <riscv_vector.h>
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
|
||||
size_t vl) {
|
||||
return vfmul_vv_f32mf2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfmul_vf_f32mf2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmul_vv_f32m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfmul_vf_f32m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmul_vv_f32m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfmul_vf_f32m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmul_vv_f32m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfmul_vf_f32m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmul_vv_f32m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfmul_vf_f32m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmul_vv_f64m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfmul_vf_f64m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmul_vv_f64m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfmul_vf_f64m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmul_vv_f64m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfmul_vf_f64m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmul_vv_f64m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfmul_vf_f64m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, vfloat32mf2_t op2,
|
||||
size_t vl) {
|
||||
return vfmul_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfmul_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, vfloat32m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmul_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfmul_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, vfloat32m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmul_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfmul_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, vfloat32m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmul_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfmul_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, vfloat32m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmul_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfmul_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, vfloat64m1_t op2,
|
||||
size_t vl) {
|
||||
return vfmul_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfmul_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, vfloat64m2_t op2,
|
||||
size_t vl) {
|
||||
return vfmul_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfmul_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, vfloat64m4_t op2,
|
||||
size_t vl) {
|
||||
return vfmul_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfmul_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vv_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, vfloat64m8_t op2,
|
||||
size_t vl) {
|
||||
return vfmul_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfmul_vf_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfmul_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
|
@ -0,0 +1,272 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// REQUIRES: riscv-registered-target
|
||||
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
|
||||
|
||||
// ASM-NOT: warning
|
||||
#include <riscv_vector.h>
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfrdiv_vf_f32mf2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfrdiv_vf_f32m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfrdiv_vf_f32m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfrdiv_vf_f32m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfrdiv_vf_f32m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfrdiv_vf_f64m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfrdiv_vf_f64m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfrdiv_vf_f64m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfrdiv_vf_f64m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfrdiv_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfrdiv_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfrdiv_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfrdiv_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfrdiv_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfrdiv_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfrdiv_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfrdiv_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfrdiv_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
|
@ -0,0 +1,272 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// REQUIRES: riscv-registered-target
|
||||
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
|
||||
|
||||
// ASM-NOT: warning
|
||||
#include <riscv_vector.h>
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfrsub_vf_f32mf2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfrsub_vf_f32m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfrsub_vf_f32m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfrsub_vf_f32m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfrsub_vf_f32m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfrsub_vf_f64m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfrsub_vf_f64m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfrsub_vf_f64m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfrsub_vf_f64m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfrsub_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfrsub_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfrsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfrsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfrsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfrsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfrsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfrsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfrsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,551 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// REQUIRES: riscv-registered-target
|
||||
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
|
||||
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
|
||||
|
||||
// ASM-NOT: warning
|
||||
#include <riscv_vector.h>
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
|
||||
size_t vl) {
|
||||
return vfsub_vv_f32mf2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f32mf2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfsub_vf_f32mf2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
|
||||
size_t vl) {
|
||||
return vfsub_vv_f32m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f32m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfsub_vf_f32m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
|
||||
size_t vl) {
|
||||
return vfsub_vv_f32m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f32m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfsub_vf_f32m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
|
||||
size_t vl) {
|
||||
return vfsub_vv_f32m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f32m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfsub_vf_f32m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
|
||||
size_t vl) {
|
||||
return vfsub_vv_f32m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f32m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfsub_vf_f32m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
|
||||
size_t vl) {
|
||||
return vfsub_vv_f64m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f64m1(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfsub_vf_f64m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
|
||||
size_t vl) {
|
||||
return vfsub_vv_f64m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f64m2(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfsub_vf_f64m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
|
||||
size_t vl) {
|
||||
return vfsub_vv_f64m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f64m4(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfsub_vf_f64m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
|
||||
size_t vl) {
|
||||
return vfsub_vv_f64m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f64m8(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfsub_vf_f64m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, vfloat32mf2_t op2,
|
||||
size_t vl) {
|
||||
return vfsub_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f32mf2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
|
||||
vfloat32mf2_t op1, float op2, size_t vl) {
|
||||
return vfsub_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, vfloat32m1_t op2,
|
||||
size_t vl) {
|
||||
return vfsub_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f32m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
|
||||
vfloat32m1_t op1, float op2, size_t vl) {
|
||||
return vfsub_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, vfloat32m2_t op2,
|
||||
size_t vl) {
|
||||
return vfsub_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f32m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
|
||||
vfloat32m2_t op1, float op2, size_t vl) {
|
||||
return vfsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, vfloat32m4_t op2,
|
||||
size_t vl) {
|
||||
return vfsub_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f32m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
|
||||
vfloat32m4_t op1, float op2, size_t vl) {
|
||||
return vfsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, vfloat32m8_t op2,
|
||||
size_t vl) {
|
||||
return vfsub_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f32m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
|
||||
vfloat32m8_t op1, float op2, size_t vl) {
|
||||
return vfsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, vfloat64m1_t op2,
|
||||
size_t vl) {
|
||||
return vfsub_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f64m1_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
|
||||
vfloat64m1_t op1, double op2, size_t vl) {
|
||||
return vfsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, vfloat64m2_t op2,
|
||||
size_t vl) {
|
||||
return vfsub_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f64m2_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
|
||||
vfloat64m2_t op1, double op2, size_t vl) {
|
||||
return vfsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, vfloat64m4_t op2,
|
||||
size_t vl) {
|
||||
return vfsub_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f64m4_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
|
||||
vfloat64m4_t op1, double op2, size_t vl) {
|
||||
return vfsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vv_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, vfloat64m8_t op2,
|
||||
size_t vl) {
|
||||
return vfsub_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV32-LABEL: @test_vfsub_vf_f64m8_m(
|
||||
// CHECK-RV32-NEXT: entry:
|
||||
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
|
||||
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
// CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||
vfloat64m8_t op1, double op2, size_t vl) {
|
||||
return vfsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
|
||||
}
|
Loading…
Reference in New Issue