forked from OSchip/llvm-project
[RISCV] [NFC] add some tests for overloaded intrinsics of FP16
Reviewed By: craig.topper Differential Revision: https://reviews.llvm.org/D122564
This commit is contained in:
parent
7df2eba7fa
commit
d9ef6ad05f
|
@ -1,7 +1,7 @@
|
||||||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||||
// REQUIRES: riscv-registered-target
|
// REQUIRES: riscv-registered-target
|
||||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \
|
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v -target-feature +zfh \
|
||||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
// RUN: -target-feature +experimental-zvfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||||
|
|
||||||
#include <riscv_vector.h>
|
#include <riscv_vector.h>
|
||||||
|
|
||||||
|
@ -534,3 +534,63 @@ vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const
|
||||||
vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) {
|
vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) {
|
||||||
return vle64(mask, maskedoff, base, vl);
|
return vle64(mask, maskedoff, base, vl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vle16_v_f16mf4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
|
||||||
|
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP1]]
|
||||||
|
//
|
||||||
|
vfloat16mf4_t test_vle16_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t vl) {
|
||||||
|
return vle16(mask, maskedoff, base, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vle16_v_f16mf2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
|
||||||
|
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP1]]
|
||||||
|
//
|
||||||
|
vfloat16mf2_t test_vle16_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t vl) {
|
||||||
|
return vle16(mask, maskedoff, base, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vle16_v_f16m1_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
|
||||||
|
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP1]]
|
||||||
|
//
|
||||||
|
vfloat16m1_t test_vle16_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t vl) {
|
||||||
|
return vle16(mask, maskedoff, base, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vle16_v_f16m2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
|
||||||
|
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP1]]
|
||||||
|
//
|
||||||
|
vfloat16m2_t test_vle16_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t vl) {
|
||||||
|
return vle16(mask, maskedoff, base, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vle16_v_f16m4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
|
||||||
|
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP1]]
|
||||||
|
//
|
||||||
|
vfloat16m4_t test_vle16_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t vl) {
|
||||||
|
return vle16(mask, maskedoff, base, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vle16_v_f16m8_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
|
||||||
|
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP1]]
|
||||||
|
//
|
||||||
|
vfloat16m8_t test_vle16_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t vl) {
|
||||||
|
return vle16(mask, maskedoff, base, vl);
|
||||||
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||||
// REQUIRES: riscv-registered-target
|
// REQUIRES: riscv-registered-target
|
||||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \
|
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v -target-feature +zfh \
|
||||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
// RUN: -target-feature +experimental-zvfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||||
|
|
||||||
#include <riscv_vector.h>
|
#include <riscv_vector.h>
|
||||||
|
|
||||||
|
@ -640,3 +640,63 @@ vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
||||||
size_t vl) {
|
size_t vl) {
|
||||||
return vlse64(mask, maskedoff, base, bstride, vl);
|
return vlse64(mask, maskedoff, base, bstride, vl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vlse16_v_f16mf4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
|
||||||
|
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x half> @llvm.riscv.vlse.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP1]]
|
||||||
|
//
|
||||||
|
vfloat16mf4_t test_vlse16_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
|
||||||
|
return vlse16(mask, maskedoff, base, bstride, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vlse16_v_f16mf2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
|
||||||
|
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x half> @llvm.riscv.vlse.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP1]]
|
||||||
|
//
|
||||||
|
vfloat16mf2_t test_vlse16_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
|
||||||
|
return vlse16(mask, maskedoff, base, bstride, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vlse16_v_f16m1_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
|
||||||
|
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x half> @llvm.riscv.vlse.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP1]]
|
||||||
|
//
|
||||||
|
vfloat16m1_t test_vlse16_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
|
||||||
|
return vlse16(mask, maskedoff, base, bstride, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vlse16_v_f16m2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
|
||||||
|
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x half> @llvm.riscv.vlse.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP1]]
|
||||||
|
//
|
||||||
|
vfloat16m2_t test_vlse16_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
|
||||||
|
return vlse16(mask, maskedoff, base, bstride, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vlse16_v_f16m4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
|
||||||
|
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x half> @llvm.riscv.vlse.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP1]]
|
||||||
|
//
|
||||||
|
vfloat16m4_t test_vlse16_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
|
||||||
|
return vlse16(mask, maskedoff, base, bstride, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vlse16_v_f16m8_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
|
||||||
|
// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x half> @llvm.riscv.vlse.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP1]]
|
||||||
|
//
|
||||||
|
vfloat16m8_t test_vlse16_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) {
|
||||||
|
return vlse16(mask, maskedoff, base, bstride, vl);
|
||||||
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||||
// REQUIRES: riscv-registered-target
|
// REQUIRES: riscv-registered-target
|
||||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \
|
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v -target-feature +zfh \
|
||||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
// RUN: -target-feature +experimental-zvfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||||
|
|
||||||
#include <riscv_vector.h>
|
#include <riscv_vector.h>
|
||||||
|
|
||||||
|
@ -1134,3 +1134,123 @@ void test_vsm_v_b32(uint8_t *base, vbool32_t value, size_t vl) {
|
||||||
void test_vsm_v_b64(uint8_t *base, vbool64_t value, size_t vl) {
|
void test_vsm_v_b64(uint8_t *base, vbool64_t value, size_t vl) {
|
||||||
return vsm(base, value, vl);
|
return vsm(base, value, vl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vse16_v_f16mf4(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f16.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vse16_v_f16mf4(_Float16 *base, vfloat16mf4_t value, size_t vl) {
|
||||||
|
return vse16(base, value, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vse16_v_f16mf2(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f16.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vse16_v_f16mf2(_Float16 *base, vfloat16mf2_t value, size_t vl) {
|
||||||
|
return vse16_v_f16mf2(base, value, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vse16_v_f16m1(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f16.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vse16_v_f16m1(_Float16 *base, vfloat16m1_t value, size_t vl) {
|
||||||
|
return vse16(base, value, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vse16_v_f16m2(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f16.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vse16_v_f16m2(_Float16 *base, vfloat16m2_t value, size_t vl) {
|
||||||
|
return vse16(base, value, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vse16_v_f16m4(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16f16.i64(<vscale x 16 x half> [[VALUE:%.*]], <vscale x 16 x half>* [[TMP0]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vse16_v_f16m4(_Float16 *base, vfloat16m4_t value, size_t vl) {
|
||||||
|
return vse16(base, value, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vse16_v_f16m8(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32f16.i64(<vscale x 32 x half> [[VALUE:%.*]], <vscale x 32 x half>* [[TMP0]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vse16_v_f16m8(_Float16 *base, vfloat16m8_t value, size_t vl) {
|
||||||
|
return vse16(base, value, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vse16_v_f16mf4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f16.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vse16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t value, size_t vl) {
|
||||||
|
return vse16(mask, base, value, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vse16_v_f16mf2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f16.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vse16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t value, size_t vl) {
|
||||||
|
return vse16(mask, base, value, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vse16_v_f16m1_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f16.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vse16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t value, size_t vl) {
|
||||||
|
return vse16(mask, base, value, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vse16_v_f16m2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f16.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vse16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t value, size_t vl) {
|
||||||
|
return vse16(mask, base, value, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vse16_v_f16m4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16f16.i64(<vscale x 16 x half> [[VALUE:%.*]], <vscale x 16 x half>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vse16_v_f16m4_m(vbool4_t mask, _Float16 *base, vfloat16m4_t value, size_t vl) {
|
||||||
|
return vse16(mask, base, value, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vse16_v_f16m8_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32f16.i64(<vscale x 32 x half> [[VALUE:%.*]], <vscale x 32 x half>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vse16_v_f16m8_m(vbool2_t mask, _Float16 *base, vfloat16m8_t value, size_t vl) {
|
||||||
|
return vse16(mask, base, value, vl);
|
||||||
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||||
// REQUIRES: riscv-registered-target
|
// REQUIRES: riscv-registered-target
|
||||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \
|
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v -target-feature +zfh \
|
||||||
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
// RUN: -target-feature +experimental-zvfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||||
|
|
||||||
#include <riscv_vector.h>
|
#include <riscv_vector.h>
|
||||||
|
|
||||||
|
@ -1170,3 +1170,123 @@ void test_vsse64_v_f64m8_m(vbool8_t mask, double *base, ptrdiff_t bstride,
|
||||||
vfloat64m8_t value, size_t vl) {
|
vfloat64m8_t value, size_t vl) {
|
||||||
return vsse64(mask, base, bstride, value, vl);
|
return vsse64(mask, base, bstride, value, vl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsse16_v_f16mf4(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1f16.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vsse16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl) {
|
||||||
|
return vsse16(base, bstride, value, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsse16_v_f16mf2(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2f16.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vsse16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl) {
|
||||||
|
return vsse16(base, bstride, value, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsse16_v_f16m1(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4f16.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vsse16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) {
|
||||||
|
return vsse16(base, bstride, value, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsse16_v_f16m2(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8f16.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vsse16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl) {
|
||||||
|
return vsse16(base, bstride, value, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsse16_v_f16m4(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16f16.i64(<vscale x 16 x half> [[VALUE:%.*]], <vscale x 16 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vsse16_v_f16m4(_Float16 *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl) {
|
||||||
|
return vsse16(base, bstride, value, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsse16_v_f16m8(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32f16.i64(<vscale x 32 x half> [[VALUE:%.*]], <vscale x 32 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vsse16_v_f16m8(_Float16 *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl) {
|
||||||
|
return vsse16(base, bstride, value, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsse16_v_f16mf4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 1 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1f16.i64(<vscale x 1 x half> [[VALUE:%.*]], <vscale x 1 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vsse16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl) {
|
||||||
|
return vsse16(mask, base, bstride, value, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsse16_v_f16mf2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 2 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2f16.i64(<vscale x 2 x half> [[VALUE:%.*]], <vscale x 2 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vsse16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl) {
|
||||||
|
return vsse16(mask, base, bstride, value, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsse16_v_f16m1_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 4 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4f16.i64(<vscale x 4 x half> [[VALUE:%.*]], <vscale x 4 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vsse16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) {
|
||||||
|
return vsse16(mask, base, bstride, value, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsse16_v_f16m2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 8 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8f16.i64(<vscale x 8 x half> [[VALUE:%.*]], <vscale x 8 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vsse16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl) {
|
||||||
|
return vsse16(mask, base, bstride, value, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsse16_v_f16m4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 16 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16f16.i64(<vscale x 16 x half> [[VALUE:%.*]], <vscale x 16 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vsse16_v_f16m4_m(vbool4_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl) {
|
||||||
|
return vsse16(mask, base, bstride, value, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsse16_v_f16m8_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to <vscale x 32 x half>*
|
||||||
|
// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32f16.i64(<vscale x 32 x half> [[VALUE:%.*]], <vscale x 32 x half>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret void
|
||||||
|
//
|
||||||
|
void test_vsse16_v_f16m8_m(vbool2_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl) {
|
||||||
|
return vsse16(mask, base, bstride, value, vl);
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue