[RISCV][Clang] Add some RVV Permutation intrinsic functions.

Support the following instructions.
1. Vector Slide Instructions
2. Vector Register Gather Instructions
3. Vector Compress Instruction

Authored-by: Roger Ferrer Ibanez <rofirrim@gmail.com>
Co-Authored-by: Zakk Chen <zakk.chen@sifive.com>

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D100127
This commit is contained in:
Zakk Chen 2021-04-08 10:15:09 -07:00
parent a8fc0e445c
commit 59d5b8c27b
15 changed files with 22700 additions and 0 deletions

View File

@ -273,6 +273,11 @@ multiclass RVVIntBinBuiltinSet
: RVVSignedBinBuiltinSet,
RVVUnsignedBinBuiltinSet;
multiclass RVVSlideOneBuiltinSet
: RVVOutOp1BuiltinSet<NAME, "csil",
[["vx", "v", "vve"],
["vx", "Uv", "UvUve"]]>;
multiclass RVVSignedShiftBuiltinSet
: RVVOutOp1BuiltinSet<NAME, "csil",
[["vv", "v", "vvUv"],
@ -396,6 +401,14 @@ class RVVMaskOp0Builtin<string prototype> : RVVOp0Builtin<"m", prototype, "c"> {
let HasMaskedOffOperand = false;
}
let HasMaskedOffOperand = false in {
multiclass RVVSlideBuiltinSet {
defm "" : RVVOutBuiltinSet<NAME, "csilfd",
[["vx","v", "vvvz"]]>;
defm "" : RVVOutBuiltinSet<NAME, "csil",
[["vx","Uv", "UvUvUvz"]]>;
}
}
class RVVFloatingUnaryBuiltin<string builtin_suffix, string ir_suffix,
string prototype>
@ -1178,3 +1191,50 @@ let HasNoMaskedOverloaded = false in {
defm vid : RVVOutBuiltinSet<"vid", "csil", [["v", "v", "v"],
["v", "Uv", "Uv"]]>;
}
// 17. Vector Permutation Instructions
// 17.1. Integer Scalar Move Instructions
// TODO
// 17.2. Floating-Point Scalar Move Instructions
// TODO
// 17.3. Vector Slide Instructions
// 17.3.1. Vector Slideup Instructions
defm vslideup : RVVSlideBuiltinSet;
// 17.3.2. Vector Slidedown Instructions
defm vslidedown : RVVSlideBuiltinSet;
// 17.3.3. Vector Slide1up Instructions
defm vslide1up : RVVSlideOneBuiltinSet;
defm vfslide1up : RVVFloatingBinVFBuiltinSet;
// 17.3.4. Vector Slide1down Instruction
defm vslide1down : RVVSlideOneBuiltinSet;
defm vfslide1down : RVVFloatingBinVFBuiltinSet;
// 17.4. Vector Register Gather Instructions
// signed and floating type
defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "csilfd",
[["vv", "v", "vvUv"]]>;
defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csilfd",
[["vx", "v", "vvz"]]>;
defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csilfd",
[["vv", "v", "vv(Log2EEW:4)Uv"]]>;
// unsigned type
defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "csil",
[["vv", "Uv", "UvUvUv"]]>;
defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csil",
[["vx", "Uv", "UvUvz"]]>;
defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csil",
[["vv", "Uv", "UvUv(Log2EEW:4)Uv"]]>;
// 17.5. Vector Compress Instruction
let HasMask = false, PermuteOperands = [2, 0, 1] in {
// signed and floating type
defm vcompress : RVVOutBuiltinSet<"vcompress", "csilfd",
[["vm", "v", "vvvm"]]>;
// unsigned type
defm vcompress : RVVOutBuiltinSet<"vcompress", "csil",
[["vm", "Uv", "UvUvUvm"]]>;
}

View File

@ -0,0 +1,296 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
// ASM-NOT: warning
#include <riscv_vector.h>
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32mf2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i32(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i64(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value,
size_t vl) {
return vfslide1down(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32.i32(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32.i64(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value,
size_t vl) {
return vfslide1down(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32.i32(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32.i64(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value,
size_t vl) {
return vfslide1down(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32.i32(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32.i64(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value,
size_t vl) {
return vfslide1down(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32.i32(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32.i64(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value,
size_t vl) {
return vfslide1down(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64.i32(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64.i64(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value,
size_t vl) {
return vfslide1down(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64.i32(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64.i64(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value,
size_t vl) {
return vfslide1down(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64.i32(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64.i64(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value,
size_t vl) {
return vfslide1down(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64.i32(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64.i64(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value,
size_t vl) {
return vfslide1down(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask,
vfloat32mf2_t maskedoff,
vfloat32mf2_t src, float value,
size_t vl) {
return vfslide1down(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask,
vfloat32m1_t maskedoff,
vfloat32m1_t src, float value,
size_t vl) {
return vfslide1down(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask,
vfloat32m2_t maskedoff,
vfloat32m2_t src, float value,
size_t vl) {
return vfslide1down(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
vfloat32m4_t src, float value,
size_t vl) {
return vfslide1down(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
vfloat32m8_t src, float value,
size_t vl) {
return vfslide1down(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask,
vfloat64m1_t maskedoff,
vfloat64m1_t src, double value,
size_t vl) {
return vfslide1down(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask,
vfloat64m2_t maskedoff,
vfloat64m2_t src, double value,
size_t vl) {
return vfslide1down(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask,
vfloat64m4_t maskedoff,
vfloat64m4_t src, double value,
size_t vl) {
return vfslide1down(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
vfloat64m8_t src, double value,
size_t vl) {
return vfslide1down(mask, maskedoff, src, value, vl);
}

View File

@ -0,0 +1,291 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
// ASM-NOT: warning
#include <riscv_vector.h>
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32mf2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i32(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i64(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value,
size_t vl) {
return vfslide1up(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32.i32(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32.i64(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value,
size_t vl) {
return vfslide1up(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32.i32(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32.i64(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value,
size_t vl) {
return vfslide1up(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32.i32(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32.i64(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value,
size_t vl) {
return vfslide1up(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32.i32(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32.i64(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value,
size_t vl) {
return vfslide1up(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64.i32(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64.i64(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value,
size_t vl) {
return vfslide1up(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64.i32(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64.i64(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value,
size_t vl) {
return vfslide1up(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64.i32(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64.i64(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value,
size_t vl) {
return vfslide1up(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64.i32(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64.i64(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value,
size_t vl) {
return vfslide1up(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask,
vfloat32mf2_t maskedoff,
vfloat32mf2_t src, float value,
size_t vl) {
return vfslide1up(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
vfloat32m1_t src, float value,
size_t vl) {
return vfslide1up(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
vfloat32m2_t src, float value,
size_t vl) {
return vfslide1up(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
vfloat32m4_t src, float value,
size_t vl) {
return vfslide1up(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
vfloat32m8_t src, float value,
size_t vl) {
return vfslide1up(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
vfloat64m1_t src, double value,
size_t vl) {
return vfslide1up(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
vfloat64m2_t src, double value,
size_t vl) {
return vfslide1up(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
vfloat64m4_t src, double value,
size_t vl) {
return vfslide1up(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
vfloat64m8_t src, double value,
size_t vl) {
return vfslide1up(mask, maskedoff, src, value, vl);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,296 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
// ASM-NOT: warning
#include <riscv_vector.h>
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32mf2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i32(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i64(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value,
size_t vl) {
return vfslide1down_vf_f32mf2(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32.i32(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32.i64(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value,
size_t vl) {
return vfslide1down_vf_f32m1(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32.i32(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32.i64(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value,
size_t vl) {
return vfslide1down_vf_f32m2(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32.i32(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32.i64(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value,
size_t vl) {
return vfslide1down_vf_f32m4(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32.i32(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32.i64(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value,
size_t vl) {
return vfslide1down_vf_f32m8(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64.i32(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64.i64(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value,
size_t vl) {
return vfslide1down_vf_f64m1(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64.i32(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64.i64(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value,
size_t vl) {
return vfslide1down_vf_f64m2(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64.i32(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64.i64(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value,
size_t vl) {
return vfslide1down_vf_f64m4(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64.i32(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64.i64(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value,
size_t vl) {
return vfslide1down_vf_f64m8(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask,
vfloat32mf2_t maskedoff,
vfloat32mf2_t src, float value,
size_t vl) {
return vfslide1down_vf_f32mf2_m(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask,
vfloat32m1_t maskedoff,
vfloat32m1_t src, float value,
size_t vl) {
return vfslide1down_vf_f32m1_m(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask,
vfloat32m2_t maskedoff,
vfloat32m2_t src, float value,
size_t vl) {
return vfslide1down_vf_f32m2_m(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
vfloat32m4_t src, float value,
size_t vl) {
return vfslide1down_vf_f32m4_m(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
vfloat32m8_t src, float value,
size_t vl) {
return vfslide1down_vf_f32m8_m(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask,
vfloat64m1_t maskedoff,
vfloat64m1_t src, double value,
size_t vl) {
return vfslide1down_vf_f64m1_m(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask,
vfloat64m2_t maskedoff,
vfloat64m2_t src, double value,
size_t vl) {
return vfslide1down_vf_f64m2_m(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask,
vfloat64m4_t maskedoff,
vfloat64m4_t src, double value,
size_t vl) {
return vfslide1down_vf_f64m4_m(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
vfloat64m8_t src, double value,
size_t vl) {
return vfslide1down_vf_f64m8_m(mask, maskedoff, src, value, vl);
}

View File

@ -0,0 +1,291 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
// ASM-NOT: warning
#include <riscv_vector.h>
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32mf2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i32(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i64(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value,
size_t vl) {
return vfslide1up_vf_f32mf2(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32.i32(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32.i64(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value,
size_t vl) {
return vfslide1up_vf_f32m1(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32.i32(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32.i64(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value,
size_t vl) {
return vfslide1up_vf_f32m2(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32.i32(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32.i64(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value,
size_t vl) {
return vfslide1up_vf_f32m4(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32.i32(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32.i64(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value,
size_t vl) {
return vfslide1up_vf_f32m8(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m1(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64.i32(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64.i64(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value,
size_t vl) {
return vfslide1up_vf_f64m1(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m2(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64.i32(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64.i64(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value,
size_t vl) {
return vfslide1up_vf_f64m2(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m4(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64.i32(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64.i64(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value,
size_t vl) {
return vfslide1up_vf_f64m4(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m8(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64.i32(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64.i64(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value,
size_t vl) {
return vfslide1up_vf_f64m8(src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32mf2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask,
vfloat32mf2_t maskedoff,
vfloat32mf2_t src, float value,
size_t vl) {
return vfslide1up_vf_f32mf2_m(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
vfloat32m1_t src, float value,
size_t vl) {
return vfslide1up_vf_f32m1_m(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
vfloat32m2_t src, float value,
size_t vl) {
return vfslide1up_vf_f32m2_m(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
vfloat32m4_t src, float value,
size_t vl) {
return vfslide1up_vf_f32m4_m(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
vfloat32m8_t src, float value,
size_t vl) {
return vfslide1up_vf_f32m8_m(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m1_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
vfloat64m1_t src, double value,
size_t vl) {
return vfslide1up_vf_f64m1_m(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m2_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
vfloat64m2_t src, double value,
size_t vl) {
return vfslide1up_vf_f64m2_m(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m4_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
vfloat64m4_t src, double value,
size_t vl) {
return vfslide1up_vf_f64m4_m(mask, maskedoff, src, value, vl);
}
// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m8_m(
// CHECK-RV32-NEXT: entry:
// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
vfloat64m8_t src, double value,
size_t vl) {
return vfslide1up_vf_f64m8_m(mask, maskedoff, src, value, vl);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff