2021-04-09 01:15:09 +08:00
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
/ / RUN : % clang_cc1 - triple riscv64 - target - feature + f - target - feature + d - target - feature + experimental - v \
2021-10-13 11:42:38 +08:00
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
2021-04-09 01:15:09 +08:00
# include <riscv_vector.h>
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i64(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
2021-04-09 01:15:09 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
2021-10-13 11:42:38 +08:00
vfloat32mf2_t test_vfslide1up_vf_f32mf2 ( vfloat32mf2_t src , float value ,
size_t vl ) {
2021-04-09 01:15:09 +08:00
return vfslide1up ( src , value , vl ) ;
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32.i64(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
2021-04-09 01:15:09 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
2021-10-13 11:42:38 +08:00
vfloat32m1_t test_vfslide1up_vf_f32m1 ( vfloat32m1_t src , float value ,
size_t vl ) {
2021-04-09 01:15:09 +08:00
return vfslide1up ( src , value , vl ) ;
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32.i64(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
2021-04-09 01:15:09 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
2021-10-13 11:42:38 +08:00
vfloat32m2_t test_vfslide1up_vf_f32m2 ( vfloat32m2_t src , float value ,
size_t vl ) {
2021-04-09 01:15:09 +08:00
return vfslide1up ( src , value , vl ) ;
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32.i64(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
2021-04-09 01:15:09 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
2021-10-13 11:42:38 +08:00
vfloat32m4_t test_vfslide1up_vf_f32m4 ( vfloat32m4_t src , float value ,
size_t vl ) {
2021-04-09 01:15:09 +08:00
return vfslide1up ( src , value , vl ) ;
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32.i64(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
2021-04-09 01:15:09 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
2021-10-13 11:42:38 +08:00
vfloat32m8_t test_vfslide1up_vf_f32m8 ( vfloat32m8_t src , float value ,
size_t vl ) {
2021-04-09 01:15:09 +08:00
return vfslide1up ( src , value , vl ) ;
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64.i64(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
2021-04-09 01:15:09 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
2021-10-13 11:42:38 +08:00
vfloat64m1_t test_vfslide1up_vf_f64m1 ( vfloat64m1_t src , double value ,
size_t vl ) {
2021-04-09 01:15:09 +08:00
return vfslide1up ( src , value , vl ) ;
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64.i64(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
2021-04-09 01:15:09 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
2021-10-13 11:42:38 +08:00
vfloat64m2_t test_vfslide1up_vf_f64m2 ( vfloat64m2_t src , double value ,
size_t vl ) {
2021-04-09 01:15:09 +08:00
return vfslide1up ( src , value , vl ) ;
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64.i64(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
2021-04-09 01:15:09 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
2021-10-13 11:42:38 +08:00
vfloat64m4_t test_vfslide1up_vf_f64m4 ( vfloat64m4_t src , double value ,
size_t vl ) {
2021-04-09 01:15:09 +08:00
return vfslide1up ( src , value , vl ) ;
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64.i64(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
2021-04-09 01:15:09 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
2021-10-13 11:42:38 +08:00
vfloat64m8_t test_vfslide1up_vf_f64m8 ( vfloat64m8_t src , double value ,
size_t vl ) {
2021-04-09 01:15:09 +08:00
return vfslide1up ( src , value , vl ) ;
}
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_m(
// CHECK-RV64-NEXT: entry:
2021-10-13 14:35:56 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
2021-04-09 01:15:09 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
2021-10-13 11:42:38 +08:00
vfloat32mf2_t test_vfslide1up_vf_f32mf2_m ( vbool64_t mask ,
vfloat32mf2_t maskedoff ,
vfloat32mf2_t src , float value ,
size_t vl ) {
2021-10-13 14:50:45 +08:00
return vfslide1up ( mask , maskedoff , src , value , vl ) ;
2021-04-09 01:15:09 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
2021-10-13 11:42:38 +08:00
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_m(
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-NEXT: entry:
2021-10-13 14:35:56 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
2021-10-13 11:42:38 +08:00
vfloat32m1_t test_vfslide1up_vf_f32m1_m ( vbool32_t mask , vfloat32m1_t maskedoff ,
vfloat32m1_t src , float value ,
size_t vl ) {
2021-10-13 14:50:45 +08:00
return vfslide1up ( mask , maskedoff , src , value , vl ) ;
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
}
2021-10-13 11:42:38 +08:00
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_m(
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-NEXT: entry:
2021-10-13 14:35:56 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
2021-10-13 11:42:38 +08:00
vfloat32m2_t test_vfslide1up_vf_f32m2_m ( vbool16_t mask , vfloat32m2_t maskedoff ,
vfloat32m2_t src , float value ,
size_t vl ) {
2021-10-13 14:50:45 +08:00
return vfslide1up ( mask , maskedoff , src , value , vl ) ;
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
}
2021-10-13 11:42:38 +08:00
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_m(
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-NEXT: entry:
2021-10-13 14:35:56 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
2021-10-13 11:42:38 +08:00
vfloat32m4_t test_vfslide1up_vf_f32m4_m ( vbool8_t mask , vfloat32m4_t maskedoff ,
vfloat32m4_t src , float value ,
size_t vl ) {
2021-10-13 14:50:45 +08:00
return vfslide1up ( mask , maskedoff , src , value , vl ) ;
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
}
2021-10-13 11:42:38 +08:00
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_m(
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-NEXT: entry:
2021-10-13 14:35:56 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
2021-10-13 11:42:38 +08:00
vfloat32m8_t test_vfslide1up_vf_f32m8_m ( vbool4_t mask , vfloat32m8_t maskedoff ,
vfloat32m8_t src , float value ,
size_t vl ) {
2021-10-13 14:50:45 +08:00
return vfslide1up ( mask , maskedoff , src , value , vl ) ;
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
}
2021-10-13 11:42:38 +08:00
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_m(
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-NEXT: entry:
2021-10-13 14:35:56 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
2021-10-13 11:42:38 +08:00
vfloat64m1_t test_vfslide1up_vf_f64m1_m ( vbool64_t mask , vfloat64m1_t maskedoff ,
vfloat64m1_t src , double value ,
size_t vl ) {
2021-10-13 14:50:45 +08:00
return vfslide1up ( mask , maskedoff , src , value , vl ) ;
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
}
2021-10-13 11:42:38 +08:00
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_m(
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-NEXT: entry:
2021-10-13 14:35:56 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
2021-10-13 11:42:38 +08:00
vfloat64m2_t test_vfslide1up_vf_f64m2_m ( vbool32_t mask , vfloat64m2_t maskedoff ,
vfloat64m2_t src , double value ,
size_t vl ) {
2021-10-13 14:50:45 +08:00
return vfslide1up ( mask , maskedoff , src , value , vl ) ;
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
}
2021-10-13 11:42:38 +08:00
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_m(
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-NEXT: entry:
2021-10-13 14:35:56 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
2021-10-13 11:42:38 +08:00
vfloat64m4_t test_vfslide1up_vf_f64m4_m ( vbool16_t mask , vfloat64m4_t maskedoff ,
vfloat64m4_t src , double value ,
size_t vl ) {
2021-10-13 14:50:45 +08:00
return vfslide1up ( mask , maskedoff , src , value , vl ) ;
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
}
2021-10-13 11:42:38 +08:00
// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_m(
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-NEXT: entry:
2021-10-13 14:35:56 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
2021-10-13 11:42:38 +08:00
vfloat64m8_t test_vfslide1up_vf_f64m8_m ( vbool8_t mask , vfloat64m8_t maskedoff ,
vfloat64m8_t src , double value ,
size_t vl ) {
2021-10-13 14:50:45 +08:00
return vfslide1up ( mask , maskedoff , src , value , vl ) ;
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
}