2021-03-31 01:12:07 +08:00
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
# include <riscv_vector.h>
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vasub_vv_i8mf8 ( vint8mf8_t op1 , vint8mf8_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vasub_vx_i8mf8 ( vint8mf8_t op1 , int8_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vasub_vv_i8mf4 ( vint8mf4_t op1 , vint8mf4_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vasub_vx_i8mf4 ( vint8mf4_t op1 , int8_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vasub_vv_i8mf2 ( vint8mf2_t op1 , vint8mf2_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vasub_vx_i8mf2 ( vint8mf2_t op1 , int8_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m1(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vasub_vv_i8m1 ( vint8m1_t op1 , vint8m1_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m1(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vasub_vx_i8m1 ( vint8m1_t op1 , int8_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vasub_vv_i8m2 ( vint8m2_t op1 , vint8m2_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vasub_vx_i8m2 ( vint8m2_t op1 , int8_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vasub_vv_i8m4 ( vint8m4_t op1 , vint8m4_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vasub_vx_i8m4 ( vint8m4_t op1 , int8_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m8(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vasub_vv_i8m8 ( vint8m8_t op1 , vint8m8_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m8(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vasub_vx_i8m8 ( vint8m8_t op1 , int8_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vasub_vv_i16mf4 ( vint16mf4_t op1 , vint16mf4_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vasub_vx_i16mf4 ( vint16mf4_t op1 , int16_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vasub_vv_i16mf2 ( vint16mf2_t op1 , vint16mf2_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vasub_vx_i16mf2 ( vint16mf2_t op1 , int16_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m1(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vasub_vv_i16m1 ( vint16m1_t op1 , vint16m1_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m1(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vasub_vx_i16m1 ( vint16m1_t op1 , int16_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vasub_vv_i16m2 ( vint16m2_t op1 , vint16m2_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vasub_vx_i16m2 ( vint16m2_t op1 , int16_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vasub_vv_i16m4 ( vint16m4_t op1 , vint16m4_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vasub_vx_i16m4 ( vint16m4_t op1 , int16_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m8(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vasub_vv_i16m8 ( vint16m8_t op1 , vint16m8_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m8(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vasub_vx_i16m8 ( vint16m8_t op1 , int16_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vv_i32mf2 ( vint32mf2_t op1 , vint32mf2_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vx_i32mf2 ( vint32mf2_t op1 , int32_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m1(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vasub_vv_i32m1 ( vint32m1_t op1 , vint32m1_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m1(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vasub_vx_i32m1 ( vint32m1_t op1 , int32_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vasub_vv_i32m2 ( vint32m2_t op1 , vint32m2_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vasub_vx_i32m2 ( vint32m2_t op1 , int32_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vasub_vv_i32m4 ( vint32m4_t op1 , vint32m4_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vasub_vx_i32m4 ( vint32m4_t op1 , int32_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m8(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vasub_vv_i32m8 ( vint32m8_t op1 , vint32m8_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m8(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vasub_vx_i32m8 ( vint32m8_t op1 , int32_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m1(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vasub_vv_i64m1 ( vint64m1_t op1 , vint64m1_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m1(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vasub_vx_i64m1 ( vint64m1_t op1 , int64_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vasub_vv_i64m2 ( vint64m2_t op1 , vint64m2_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vasub_vx_i64m2 ( vint64m2_t op1 , int64_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vasub_vv_i64m4 ( vint64m4_t op1 , vint64m4_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vasub_vx_i64m4 ( vint64m4_t op1 , int64_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m8(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vasub_vv_i64m8 ( vint64m8_t op1 , vint64m8_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m8(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vasub_vx_i64m8 ( vint64m8_t op1 , int64_t op2 , size_t vl ) {
return vasub ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vasubu_vv_u8mf8 ( vuint8mf8_t op1 , vuint8mf8_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vasubu_vx_u8mf8 ( vuint8mf8_t op1 , uint8_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vasubu_vv_u8mf4 ( vuint8mf4_t op1 , vuint8mf4_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vasubu_vx_u8mf4 ( vuint8mf4_t op1 , uint8_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vasubu_vv_u8mf2 ( vuint8mf2_t op1 , vuint8mf2_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vasubu_vx_u8mf2 ( vuint8mf2_t op1 , uint8_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vasubu_vv_u8m1 ( vuint8m1_t op1 , vuint8m1_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vasubu_vx_u8m1 ( vuint8m1_t op1 , uint8_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vasubu_vv_u8m2 ( vuint8m2_t op1 , vuint8m2_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vasubu_vx_u8m2 ( vuint8m2_t op1 , uint8_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vasubu_vv_u8m4 ( vuint8m4_t op1 , vuint8m4_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vasubu_vx_u8m4 ( vuint8m4_t op1 , uint8_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vasubu_vv_u8m8 ( vuint8m8_t op1 , vuint8m8_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vasubu_vx_u8m8 ( vuint8m8_t op1 , uint8_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint16mf4_t test_vasubu_vv_u16mf4 ( vuint16mf4_t op1 , vuint16mf4_t op2 , size_t vl ) {
2021-03-31 01:12:07 +08:00
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vasubu_vx_u16mf4 ( vuint16mf4_t op1 , uint16_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint16mf2_t test_vasubu_vv_u16mf2 ( vuint16mf2_t op1 , vuint16mf2_t op2 , size_t vl ) {
2021-03-31 01:12:07 +08:00
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vasubu_vx_u16mf2 ( vuint16mf2_t op1 , uint16_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vasubu_vv_u16m1 ( vuint16m1_t op1 , vuint16m1_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vasubu_vx_u16m1 ( vuint16m1_t op1 , uint16_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vasubu_vv_u16m2 ( vuint16m2_t op1 , vuint16m2_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vasubu_vx_u16m2 ( vuint16m2_t op1 , uint16_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vasubu_vv_u16m4 ( vuint16m4_t op1 , vuint16m4_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vasubu_vx_u16m4 ( vuint16m4_t op1 , uint16_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vasubu_vv_u16m8 ( vuint16m8_t op1 , vuint16m8_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vasubu_vx_u16m8 ( vuint16m8_t op1 , uint16_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint32mf2_t test_vasubu_vv_u32mf2 ( vuint32mf2_t op1 , vuint32mf2_t op2 , size_t vl ) {
2021-03-31 01:12:07 +08:00
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vx_u32mf2 ( vuint32mf2_t op1 , uint32_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vasubu_vv_u32m1 ( vuint32m1_t op1 , vuint32m1_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vasubu_vx_u32m1 ( vuint32m1_t op1 , uint32_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vasubu_vv_u32m2 ( vuint32m2_t op1 , vuint32m2_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vasubu_vx_u32m2 ( vuint32m2_t op1 , uint32_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vasubu_vv_u32m4 ( vuint32m4_t op1 , vuint32m4_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vasubu_vx_u32m4 ( vuint32m4_t op1 , uint32_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vasubu_vv_u32m8 ( vuint32m8_t op1 , vuint32m8_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vasubu_vx_u32m8 ( vuint32m8_t op1 , uint32_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vasubu_vv_u64m1 ( vuint64m1_t op1 , vuint64m1_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vasubu_vx_u64m1 ( vuint64m1_t op1 , uint64_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vasubu_vv_u64m2 ( vuint64m2_t op1 , vuint64m2_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vasubu_vx_u64m2 ( vuint64m2_t op1 , uint64_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vasubu_vv_u64m4 ( vuint64m4_t op1 , vuint64m4_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vasubu_vx_u64m4 ( vuint64m4_t op1 , uint64_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vasubu_vv_u64m8 ( vuint64m8_t op1 , vuint64m8_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
2021-04-16 23:39:08 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vasubu_vx_u64m8 ( vuint64m8_t op1 , uint64_t op2 , size_t vl ) {
return vasubu ( op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_m(
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint8mf8_t test_vasub_vv_i8mf8_m ( vbool64_t mask , vint8mf8_t maskedoff , vint8mf8_t op1 , vint8mf8_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vasub_vx_i8mf8_m ( vbool64_t mask , vint8mf8_t maskedoff , vint8mf8_t op1 , int8_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vasub_vv_i8mf4_m ( vbool32_t mask , vint8mf4_t maskedoff , vint8mf4_t op1 , vint8mf4_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vasub_vx_i8mf4_m ( vbool32_t mask , vint8mf4_t maskedoff , vint8mf4_t op1 , int8_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vasub_vv_i8mf2_m ( vbool16_t mask , vint8mf2_t maskedoff , vint8mf2_t op1 , vint8mf2_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vasub_vx_i8mf2_m ( vbool16_t mask , vint8mf2_t maskedoff , vint8mf2_t op1 , int8_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vasub_vv_i8m1_m ( vbool8_t mask , vint8m1_t maskedoff , vint8m1_t op1 , vint8m1_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vasub_vx_i8m1_m ( vbool8_t mask , vint8m1_t maskedoff , vint8m1_t op1 , int8_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vasub_vv_i8m2_m ( vbool4_t mask , vint8m2_t maskedoff , vint8m2_t op1 , vint8m2_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vasub_vx_i8m2_m ( vbool4_t mask , vint8m2_t maskedoff , vint8m2_t op1 , int8_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vasub_vv_i8m4_m ( vbool2_t mask , vint8m4_t maskedoff , vint8m4_t op1 , vint8m4_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vasub_vx_i8m4_m ( vbool2_t mask , vint8m4_t maskedoff , vint8m4_t op1 , int8_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vasub_vv_i8m8_m ( vbool1_t mask , vint8m8_t maskedoff , vint8m8_t op1 , vint8m8_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i8m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vasub_vx_i8m8_m ( vbool1_t mask , vint8m8_t maskedoff , vint8m8_t op1 , int8_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vasub_vv_i16mf4_m ( vbool64_t mask , vint16mf4_t maskedoff , vint16mf4_t op1 , vint16mf4_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vasub_vx_i16mf4_m ( vbool64_t mask , vint16mf4_t maskedoff , vint16mf4_t op1 , int16_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vasub_vv_i16mf2_m ( vbool32_t mask , vint16mf2_t maskedoff , vint16mf2_t op1 , vint16mf2_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vasub_vx_i16mf2_m ( vbool32_t mask , vint16mf2_t maskedoff , vint16mf2_t op1 , int16_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vasub_vv_i16m1_m ( vbool16_t mask , vint16m1_t maskedoff , vint16m1_t op1 , vint16m1_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vasub_vx_i16m1_m ( vbool16_t mask , vint16m1_t maskedoff , vint16m1_t op1 , int16_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vasub_vv_i16m2_m ( vbool8_t mask , vint16m2_t maskedoff , vint16m2_t op1 , vint16m2_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vasub_vx_i16m2_m ( vbool8_t mask , vint16m2_t maskedoff , vint16m2_t op1 , int16_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vasub_vv_i16m4_m ( vbool4_t mask , vint16m4_t maskedoff , vint16m4_t op1 , vint16m4_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vasub_vx_i16m4_m ( vbool4_t mask , vint16m4_t maskedoff , vint16m4_t op1 , int16_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i16m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vasub_vv_i16m8_m ( vbool2_t mask , vint16m8_t maskedoff , vint16m8_t op1 , vint16m8_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i16m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vasub_vx_i16m8_m ( vbool2_t mask , vint16m8_t maskedoff , vint16m8_t op1 , int16_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vv_i32mf2_m ( vbool64_t mask , vint32mf2_t maskedoff , vint32mf2_t op1 , vint32mf2_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vasub_vx_i32mf2_m ( vbool64_t mask , vint32mf2_t maskedoff , vint32mf2_t op1 , int32_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vasub_vv_i32m1_m ( vbool32_t mask , vint32m1_t maskedoff , vint32m1_t op1 , vint32m1_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vasub_vx_i32m1_m ( vbool32_t mask , vint32m1_t maskedoff , vint32m1_t op1 , int32_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vasub_vv_i32m2_m ( vbool16_t mask , vint32m2_t maskedoff , vint32m2_t op1 , vint32m2_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vasub_vx_i32m2_m ( vbool16_t mask , vint32m2_t maskedoff , vint32m2_t op1 , int32_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vasub_vv_i32m4_m ( vbool8_t mask , vint32m4_t maskedoff , vint32m4_t op1 , vint32m4_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vasub_vx_i32m4_m ( vbool8_t mask , vint32m4_t maskedoff , vint32m4_t op1 , int32_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vasub_vv_i32m8_m ( vbool4_t mask , vint32m8_t maskedoff , vint32m8_t op1 , vint32m8_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vasub_vx_i32m8_m ( vbool4_t mask , vint32m8_t maskedoff , vint32m8_t op1 , int32_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vasub_vv_i64m1_m ( vbool64_t mask , vint64m1_t maskedoff , vint64m1_t op1 , vint64m1_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vasub_vx_i64m1_m ( vbool64_t mask , vint64m1_t maskedoff , vint64m1_t op1 , int64_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vasub_vv_i64m2_m ( vbool32_t mask , vint64m2_t maskedoff , vint64m2_t op1 , vint64m2_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vasub_vx_i64m2_m ( vbool32_t mask , vint64m2_t maskedoff , vint64m2_t op1 , int64_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vasub_vv_i64m4_m ( vbool16_t mask , vint64m4_t maskedoff , vint64m4_t op1 , vint64m4_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vasub_vx_i64m4_m ( vbool16_t mask , vint64m4_t maskedoff , vint64m4_t op1 , int64_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vasub_vv_i64m8_m ( vbool8_t mask , vint64m8_t maskedoff , vint64m8_t op1 , vint64m8_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vx_i64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vasub_vx_i64m8_m ( vbool8_t mask , vint64m8_t maskedoff , vint64m8_t op1 , int64_t op2 , size_t vl ) {
return vasub ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vasubu_vv_u8mf8_m ( vbool64_t mask , vuint8mf8_t maskedoff , vuint8mf8_t op1 , vuint8mf8_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vasubu_vx_u8mf8_m ( vbool64_t mask , vuint8mf8_t maskedoff , vuint8mf8_t op1 , uint8_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vasubu_vv_u8mf4_m ( vbool32_t mask , vuint8mf4_t maskedoff , vuint8mf4_t op1 , vuint8mf4_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vasubu_vx_u8mf4_m ( vbool32_t mask , vuint8mf4_t maskedoff , vuint8mf4_t op1 , uint8_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vasubu_vv_u8mf2_m ( vbool16_t mask , vuint8mf2_t maskedoff , vuint8mf2_t op1 , vuint8mf2_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vasubu_vx_u8mf2_m ( vbool16_t mask , vuint8mf2_t maskedoff , vuint8mf2_t op1 , uint8_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vasubu_vv_u8m1_m ( vbool8_t mask , vuint8m1_t maskedoff , vuint8m1_t op1 , vuint8m1_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vasubu_vx_u8m1_m ( vbool8_t mask , vuint8m1_t maskedoff , vuint8m1_t op1 , uint8_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vasubu_vv_u8m2_m ( vbool4_t mask , vuint8m2_t maskedoff , vuint8m2_t op1 , vuint8m2_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vasubu_vx_u8m2_m ( vbool4_t mask , vuint8m2_t maskedoff , vuint8m2_t op1 , uint8_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vasubu_vv_u8m4_m ( vbool2_t mask , vuint8m4_t maskedoff , vuint8m4_t op1 , vuint8m4_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vasubu_vx_u8m4_m ( vbool2_t mask , vuint8m4_t maskedoff , vuint8m4_t op1 , uint8_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vasubu_vv_u8m8_m ( vbool1_t mask , vuint8m8_t maskedoff , vuint8m8_t op1 , vuint8m8_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vasubu_vx_u8m8_m ( vbool1_t mask , vuint8m8_t maskedoff , vuint8m8_t op1 , uint8_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vasubu_vv_u16mf4_m ( vbool64_t mask , vuint16mf4_t maskedoff , vuint16mf4_t op1 , vuint16mf4_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vasubu_vx_u16mf4_m ( vbool64_t mask , vuint16mf4_t maskedoff , vuint16mf4_t op1 , uint16_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vasubu_vv_u16mf2_m ( vbool32_t mask , vuint16mf2_t maskedoff , vuint16mf2_t op1 , vuint16mf2_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vasubu_vx_u16mf2_m ( vbool32_t mask , vuint16mf2_t maskedoff , vuint16mf2_t op1 , uint16_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vasubu_vv_u16m1_m ( vbool16_t mask , vuint16m1_t maskedoff , vuint16m1_t op1 , vuint16m1_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vasubu_vx_u16m1_m ( vbool16_t mask , vuint16m1_t maskedoff , vuint16m1_t op1 , uint16_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vasubu_vv_u16m2_m ( vbool8_t mask , vuint16m2_t maskedoff , vuint16m2_t op1 , vuint16m2_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vasubu_vx_u16m2_m ( vbool8_t mask , vuint16m2_t maskedoff , vuint16m2_t op1 , uint16_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vasubu_vv_u16m4_m ( vbool4_t mask , vuint16m4_t maskedoff , vuint16m4_t op1 , vuint16m4_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vasubu_vx_u16m4_m ( vbool4_t mask , vuint16m4_t maskedoff , vuint16m4_t op1 , uint16_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vasubu_vv_u16m8_m ( vbool2_t mask , vuint16m8_t maskedoff , vuint16m8_t op1 , vuint16m8_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vasubu_vx_u16m8_m ( vbool2_t mask , vuint16m8_t maskedoff , vuint16m8_t op1 , uint16_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vv_u32mf2_m ( vbool64_t mask , vuint32mf2_t maskedoff , vuint32mf2_t op1 , vuint32mf2_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vasubu_vx_u32mf2_m ( vbool64_t mask , vuint32mf2_t maskedoff , vuint32mf2_t op1 , uint32_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vasubu_vv_u32m1_m ( vbool32_t mask , vuint32m1_t maskedoff , vuint32m1_t op1 , vuint32m1_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vasubu_vx_u32m1_m ( vbool32_t mask , vuint32m1_t maskedoff , vuint32m1_t op1 , uint32_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vasubu_vv_u32m2_m ( vbool16_t mask , vuint32m2_t maskedoff , vuint32m2_t op1 , vuint32m2_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vasubu_vx_u32m2_m ( vbool16_t mask , vuint32m2_t maskedoff , vuint32m2_t op1 , uint32_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vasubu_vv_u32m4_m ( vbool8_t mask , vuint32m4_t maskedoff , vuint32m4_t op1 , vuint32m4_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vasubu_vx_u32m4_m ( vbool8_t mask , vuint32m4_t maskedoff , vuint32m4_t op1 , uint32_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vasubu_vv_u32m8_m ( vbool4_t mask , vuint32m8_t maskedoff , vuint32m8_t op1 , vuint32m8_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vasubu_vx_u32m8_m ( vbool4_t mask , vuint32m8_t maskedoff , vuint32m8_t op1 , uint32_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vasubu_vv_u64m1_m ( vbool64_t mask , vuint64m1_t maskedoff , vuint64m1_t op1 , vuint64m1_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vasubu_vx_u64m1_m ( vbool64_t mask , vuint64m1_t maskedoff , vuint64m1_t op1 , uint64_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vasubu_vv_u64m2_m ( vbool32_t mask , vuint64m2_t maskedoff , vuint64m2_t op1 , vuint64m2_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vasubu_vx_u64m2_m ( vbool32_t mask , vuint64m2_t maskedoff , vuint64m2_t op1 , uint64_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vasubu_vv_u64m4_m ( vbool16_t mask , vuint64m4_t maskedoff , vuint64m4_t op1 , vuint64m4_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vasubu_vx_u64m4_m ( vbool16_t mask , vuint64m4_t maskedoff , vuint64m4_t op1 , uint64_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vasubu_vv_u64m8_m ( vbool8_t mask , vuint64m8_t maskedoff , vuint64m8_t op1 , vuint64m8_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vasubu_vx_u64m8_m ( vbool8_t mask , vuint64m8_t maskedoff , vuint64m8_t op1 , uint64_t op2 , size_t vl ) {
return vasubu ( mask , maskedoff , op1 , op2 , vl ) ;
}
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_mt(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vasub_vv_i8mf8_mt ( vbool64_t mask , vint8mf8_t maskedoff , vint8mf8_t op1 , vint8mf8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint8mf8_t test_vasub_vx_i8mf8_mt ( vbool64_t mask , vint8mf8_t maskedoff , vint8mf8_t op1 , int8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint8mf4_t test_vasub_vv_i8mf4_mt ( vbool32_t mask , vint8mf4_t maskedoff , vint8mf4_t op1 , vint8mf4_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint8mf4_t test_vasub_vx_i8mf4_mt ( vbool32_t mask , vint8mf4_t maskedoff , vint8mf4_t op1 , int8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint8mf2_t test_vasub_vv_i8mf2_mt ( vbool16_t mask , vint8mf2_t maskedoff , vint8mf2_t op1 , vint8mf2_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint8mf2_t test_vasub_vx_i8mf2_mt ( vbool16_t mask , vint8mf2_t maskedoff , vint8mf2_t op1 , int8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vv_i8m1_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint8m1_t test_vasub_vv_i8m1_mt ( vbool8_t mask , vint8m1_t maskedoff , vint8m1_t op1 , vint8m1_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vx_i8m1_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint8m1_t test_vasub_vx_i8m1_mt ( vbool8_t mask , vint8m1_t maskedoff , vint8m1_t op1 , int8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vv_i8m2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint8m2_t test_vasub_vv_i8m2_mt ( vbool4_t mask , vint8m2_t maskedoff , vint8m2_t op1 , vint8m2_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vx_i8m2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint8m2_t test_vasub_vx_i8m2_mt ( vbool4_t mask , vint8m2_t maskedoff , vint8m2_t op1 , int8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vv_i8m4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint8m4_t test_vasub_vv_i8m4_mt ( vbool2_t mask , vint8m4_t maskedoff , vint8m4_t op1 , vint8m4_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vx_i8m4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint8m4_t test_vasub_vx_i8m4_mt ( vbool2_t mask , vint8m4_t maskedoff , vint8m4_t op1 , int8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vv_i8m8_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint8m8_t test_vasub_vv_i8m8_mt ( vbool1_t mask , vint8m8_t maskedoff , vint8m8_t op1 , vint8m8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vx_i8m8_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint8m8_t test_vasub_vx_i8m8_mt ( vbool1_t mask , vint8m8_t maskedoff , vint8m8_t op1 , int8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint16mf4_t test_vasub_vv_i16mf4_mt ( vbool64_t mask , vint16mf4_t maskedoff , vint16mf4_t op1 , vint16mf4_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint16mf4_t test_vasub_vx_i16mf4_mt ( vbool64_t mask , vint16mf4_t maskedoff , vint16mf4_t op1 , int16_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint16mf2_t test_vasub_vv_i16mf2_mt ( vbool32_t mask , vint16mf2_t maskedoff , vint16mf2_t op1 , vint16mf2_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint16mf2_t test_vasub_vx_i16mf2_mt ( vbool32_t mask , vint16mf2_t maskedoff , vint16mf2_t op1 , int16_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vv_i16m1_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint16m1_t test_vasub_vv_i16m1_mt ( vbool16_t mask , vint16m1_t maskedoff , vint16m1_t op1 , vint16m1_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vx_i16m1_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint16m1_t test_vasub_vx_i16m1_mt ( vbool16_t mask , vint16m1_t maskedoff , vint16m1_t op1 , int16_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vv_i16m2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint16m2_t test_vasub_vv_i16m2_mt ( vbool8_t mask , vint16m2_t maskedoff , vint16m2_t op1 , vint16m2_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vx_i16m2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint16m2_t test_vasub_vx_i16m2_mt ( vbool8_t mask , vint16m2_t maskedoff , vint16m2_t op1 , int16_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vv_i16m4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint16m4_t test_vasub_vv_i16m4_mt ( vbool4_t mask , vint16m4_t maskedoff , vint16m4_t op1 , vint16m4_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vx_i16m4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint16m4_t test_vasub_vx_i16m4_mt ( vbool4_t mask , vint16m4_t maskedoff , vint16m4_t op1 , int16_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vv_i16m8_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint16m8_t test_vasub_vv_i16m8_mt ( vbool2_t mask , vint16m8_t maskedoff , vint16m8_t op1 , vint16m8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vx_i16m8_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint16m8_t test_vasub_vx_i16m8_mt ( vbool2_t mask , vint16m8_t maskedoff , vint16m8_t op1 , int16_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint32mf2_t test_vasub_vv_i32mf2_mt ( vbool64_t mask , vint32mf2_t maskedoff , vint32mf2_t op1 , vint32mf2_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint32mf2_t test_vasub_vx_i32mf2_mt ( vbool64_t mask , vint32mf2_t maskedoff , vint32mf2_t op1 , int32_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vv_i32m1_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint32m1_t test_vasub_vv_i32m1_mt ( vbool32_t mask , vint32m1_t maskedoff , vint32m1_t op1 , vint32m1_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vx_i32m1_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint32m1_t test_vasub_vx_i32m1_mt ( vbool32_t mask , vint32m1_t maskedoff , vint32m1_t op1 , int32_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vv_i32m2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint32m2_t test_vasub_vv_i32m2_mt ( vbool16_t mask , vint32m2_t maskedoff , vint32m2_t op1 , vint32m2_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vx_i32m2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint32m2_t test_vasub_vx_i32m2_mt ( vbool16_t mask , vint32m2_t maskedoff , vint32m2_t op1 , int32_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vv_i32m4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint32m4_t test_vasub_vv_i32m4_mt ( vbool8_t mask , vint32m4_t maskedoff , vint32m4_t op1 , vint32m4_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vx_i32m4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint32m4_t test_vasub_vx_i32m4_mt ( vbool8_t mask , vint32m4_t maskedoff , vint32m4_t op1 , int32_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vv_i32m8_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint32m8_t test_vasub_vv_i32m8_mt ( vbool4_t mask , vint32m8_t maskedoff , vint32m8_t op1 , vint32m8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vx_i32m8_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint32m8_t test_vasub_vx_i32m8_mt ( vbool4_t mask , vint32m8_t maskedoff , vint32m8_t op1 , int32_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vv_i64m1_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint64m1_t test_vasub_vv_i64m1_mt ( vbool64_t mask , vint64m1_t maskedoff , vint64m1_t op1 , vint64m1_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vx_i64m1_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint64m1_t test_vasub_vx_i64m1_mt ( vbool64_t mask , vint64m1_t maskedoff , vint64m1_t op1 , int64_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vv_i64m2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint64m2_t test_vasub_vv_i64m2_mt ( vbool32_t mask , vint64m2_t maskedoff , vint64m2_t op1 , vint64m2_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vx_i64m2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint64m2_t test_vasub_vx_i64m2_mt ( vbool32_t mask , vint64m2_t maskedoff , vint64m2_t op1 , int64_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vv_i64m4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint64m4_t test_vasub_vv_i64m4_mt ( vbool16_t mask , vint64m4_t maskedoff , vint64m4_t op1 , vint64m4_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vx_i64m4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint64m4_t test_vasub_vx_i64m4_mt ( vbool16_t mask , vint64m4_t maskedoff , vint64m4_t op1 , int64_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vv_i64m8_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint64m8_t test_vasub_vv_i64m8_mt ( vbool8_t mask , vint64m8_t maskedoff , vint64m8_t op1 , vint64m8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasub_vx_i64m8_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vint64m8_t test_vasub_vx_i64m8_mt ( vbool8_t mask , vint64m8_t maskedoff , vint64m8_t op1 , int64_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasub ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint8mf8_t test_vasubu_vv_u8mf8_mt ( vbool64_t mask , vuint8mf8_t maskedoff , vuint8mf8_t op1 , vuint8mf8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint8mf8_t test_vasubu_vx_u8mf8_mt ( vbool64_t mask , vuint8mf8_t maskedoff , vuint8mf8_t op1 , uint8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint8mf4_t test_vasubu_vv_u8mf4_mt ( vbool32_t mask , vuint8mf4_t maskedoff , vuint8mf4_t op1 , vuint8mf4_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint8mf4_t test_vasubu_vx_u8mf4_mt ( vbool32_t mask , vuint8mf4_t maskedoff , vuint8mf4_t op1 , uint8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint8mf2_t test_vasubu_vv_u8mf2_mt ( vbool16_t mask , vuint8mf2_t maskedoff , vuint8mf2_t op1 , vuint8mf2_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint8mf2_t test_vasubu_vx_u8mf2_mt ( vbool16_t mask , vuint8mf2_t maskedoff , vuint8mf2_t op1 , uint8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint8m1_t test_vasubu_vv_u8m1_mt ( vbool8_t mask , vuint8m1_t maskedoff , vuint8m1_t op1 , vuint8m1_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint8m1_t test_vasubu_vx_u8m1_mt ( vbool8_t mask , vuint8m1_t maskedoff , vuint8m1_t op1 , uint8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint8m2_t test_vasubu_vv_u8m2_mt ( vbool4_t mask , vuint8m2_t maskedoff , vuint8m2_t op1 , vuint8m2_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint8m2_t test_vasubu_vx_u8m2_mt ( vbool4_t mask , vuint8m2_t maskedoff , vuint8m2_t op1 , uint8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint8m4_t test_vasubu_vv_u8m4_mt ( vbool2_t mask , vuint8m4_t maskedoff , vuint8m4_t op1 , vuint8m4_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint8m4_t test_vasubu_vx_u8m4_mt ( vbool2_t mask , vuint8m4_t maskedoff , vuint8m4_t op1 , uint8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint8m8_t test_vasubu_vv_u8m8_mt ( vbool1_t mask , vuint8m8_t maskedoff , vuint8m8_t op1 , vuint8m8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint8m8_t test_vasubu_vx_u8m8_mt ( vbool1_t mask , vuint8m8_t maskedoff , vuint8m8_t op1 , uint8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint16mf4_t test_vasubu_vv_u16mf4_mt ( vbool64_t mask , vuint16mf4_t maskedoff , vuint16mf4_t op1 , vuint16mf4_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint16mf4_t test_vasubu_vx_u16mf4_mt ( vbool64_t mask , vuint16mf4_t maskedoff , vuint16mf4_t op1 , uint16_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint16mf2_t test_vasubu_vv_u16mf2_mt ( vbool32_t mask , vuint16mf2_t maskedoff , vuint16mf2_t op1 , vuint16mf2_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint16mf2_t test_vasubu_vx_u16mf2_mt ( vbool32_t mask , vuint16mf2_t maskedoff , vuint16mf2_t op1 , uint16_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint16m1_t test_vasubu_vv_u16m1_mt ( vbool16_t mask , vuint16m1_t maskedoff , vuint16m1_t op1 , vuint16m1_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint16m1_t test_vasubu_vx_u16m1_mt ( vbool16_t mask , vuint16m1_t maskedoff , vuint16m1_t op1 , uint16_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint16m2_t test_vasubu_vv_u16m2_mt ( vbool8_t mask , vuint16m2_t maskedoff , vuint16m2_t op1 , vuint16m2_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint16m2_t test_vasubu_vx_u16m2_mt ( vbool8_t mask , vuint16m2_t maskedoff , vuint16m2_t op1 , uint16_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint16m4_t test_vasubu_vv_u16m4_mt ( vbool4_t mask , vuint16m4_t maskedoff , vuint16m4_t op1 , vuint16m4_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint16m4_t test_vasubu_vx_u16m4_mt ( vbool4_t mask , vuint16m4_t maskedoff , vuint16m4_t op1 , uint16_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint16m8_t test_vasubu_vv_u16m8_mt ( vbool2_t mask , vuint16m8_t maskedoff , vuint16m8_t op1 , vuint16m8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint16m8_t test_vasubu_vx_u16m8_mt ( vbool2_t mask , vuint16m8_t maskedoff , vuint16m8_t op1 , uint16_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint32mf2_t test_vasubu_vv_u32mf2_mt ( vbool64_t mask , vuint32mf2_t maskedoff , vuint32mf2_t op1 , vuint32mf2_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint32mf2_t test_vasubu_vx_u32mf2_mt ( vbool64_t mask , vuint32mf2_t maskedoff , vuint32mf2_t op1 , uint32_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint32m1_t test_vasubu_vv_u32m1_mt ( vbool32_t mask , vuint32m1_t maskedoff , vuint32m1_t op1 , vuint32m1_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint32m1_t test_vasubu_vx_u32m1_mt ( vbool32_t mask , vuint32m1_t maskedoff , vuint32m1_t op1 , uint32_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint32m2_t test_vasubu_vv_u32m2_mt ( vbool16_t mask , vuint32m2_t maskedoff , vuint32m2_t op1 , vuint32m2_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint32m2_t test_vasubu_vx_u32m2_mt ( vbool16_t mask , vuint32m2_t maskedoff , vuint32m2_t op1 , uint32_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint32m4_t test_vasubu_vv_u32m4_mt ( vbool8_t mask , vuint32m4_t maskedoff , vuint32m4_t op1 , vuint32m4_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint32m4_t test_vasubu_vx_u32m4_mt ( vbool8_t mask , vuint32m4_t maskedoff , vuint32m4_t op1 , uint32_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint32m8_t test_vasubu_vv_u32m8_mt ( vbool4_t mask , vuint32m8_t maskedoff , vuint32m8_t op1 , vuint32m8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint32m8_t test_vasubu_vx_u32m8_mt ( vbool4_t mask , vuint32m8_t maskedoff , vuint32m8_t op1 , uint32_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint64m1_t test_vasubu_vv_u64m1_mt ( vbool64_t mask , vuint64m1_t maskedoff , vuint64m1_t op1 , vuint64m1_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint64m1_t test_vasubu_vx_u64m1_mt ( vbool64_t mask , vuint64m1_t maskedoff , vuint64m1_t op1 , uint64_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint64m2_t test_vasubu_vv_u64m2_mt ( vbool32_t mask , vuint64m2_t maskedoff , vuint64m2_t op1 , vuint64m2_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint64m2_t test_vasubu_vx_u64m2_mt ( vbool32_t mask , vuint64m2_t maskedoff , vuint64m2_t op1 , uint64_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint64m4_t test_vasubu_vv_u64m4_mt ( vbool16_t mask , vuint64m4_t maskedoff , vuint64m4_t op1 , vuint64m4_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint64m4_t test_vasubu_vx_u64m4_mt ( vbool16_t mask , vuint64m4_t maskedoff , vuint64m4_t op1 , uint64_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint64m8_t test_vasubu_vv_u64m8_mt ( vbool8_t mask , vuint64m8_t maskedoff , vuint64m8_t op1 , vuint64m8_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_mt(
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: entry:
2021-09-06 21:27:15 +08:00
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2021-03-31 01:12:07 +08:00
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00
vuint64m8_t test_vasubu_vx_u64m8_mt ( vbool8_t mask , vuint64m8_t maskedoff , vuint64m8_t op1 , uint64_t op2 , size_t vl , size_t ta ) {
2021-09-06 21:27:15 +08:00
return vasubu ( mask , maskedoff , op1 , op2 , vl , VE_TAIL_AGNOSTIC ) ;
2021-03-31 01:12:07 +08:00
}
[RISCV] Define _m intrinsics as builtins, instead of macros.
In the original design, we levarage _mt intrinsics to define macros for
_m intrinsics. Such as,
```
__builtin_rvv_vadd_vv_i8m1_mt((vbool8_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vint8m1_t)(op3), (size_t)(op4), (size_t)VE_TAIL_AGNOSTIC)
```
However, we could not define generic interface for mask intrinsics any
more due to clang_builtin_alias only accepts clang builtins as its
argument.
In the example,
```
__rvv_overloaded
__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1_mt)))
vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t
op3, size_t op4, size_t op5);
```
op5 is the tail policy argument. When users want to use vadd generic
interface for masked vector add, they need to specify tail policy in the
previous design. In this patch, we define _m intrinsics as clang
builtins to solve the problem.
Differential Revision: https://reviews.llvm.org/D110684
2021-09-29 15:34:57 +08:00