forked from OSchip/llvm-project
[RISCV][VP] Add tests for "unmasked" VP loads
These aren't currently matched against unmasked vector load instructions. A patch to fix that will come later.
This commit is contained in:
parent
5880c835bd
commit
1fc80ffc92
|
@ -28,6 +28,20 @@ define <4 x i8> @vpload_v4i8(<4 x i8>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
|
|||
ret <4 x i8> %load
|
||||
}
|
||||
|
||||
define <4 x i8> @vpload_v4i8_allones_mask(<4 x i8>* %ptr, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vpload_v4i8_allones_mask:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
|
||||
; CHECK-NEXT: vmset.m v0
|
||||
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
|
||||
; CHECK-NEXT: vle8.v v8, (a0), v0.t
|
||||
; CHECK-NEXT: ret
|
||||
%a = insertelement <4 x i1> undef, i1 true, i32 0
|
||||
%b = shufflevector <4 x i1> %a, <4 x i1> poison, <4 x i32> zeroinitializer
|
||||
%load = call <4 x i8> @llvm.vp.load.v4i8(<4 x i8>* %ptr, <4 x i1> %b, i32 %evl)
|
||||
ret <4 x i8> %load
|
||||
}
|
||||
|
||||
declare <8 x i8> @llvm.vp.load.v8i8(<8 x i8>*, <8 x i1>, i32)
|
||||
|
||||
define <8 x i8> @vpload_v8i8(<8 x i8>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
|
||||
|
@ -76,6 +90,20 @@ define <8 x i16> @vpload_v8i16(<8 x i16>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
|
|||
ret <8 x i16> %load
|
||||
}
|
||||
|
||||
define <8 x i16> @vpload_v8i16_allones_mask(<8 x i16>* %ptr, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vpload_v8i16_allones_mask:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vmset.m v0
|
||||
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
|
||||
; CHECK-NEXT: vle16.v v8, (a0), v0.t
|
||||
; CHECK-NEXT: ret
|
||||
%a = insertelement <8 x i1> undef, i1 true, i32 0
|
||||
%b = shufflevector <8 x i1> %a, <8 x i1> poison, <8 x i32> zeroinitializer
|
||||
%load = call <8 x i16> @llvm.vp.load.v8i16(<8 x i16>* %ptr, <8 x i1> %b, i32 %evl)
|
||||
ret <8 x i16> %load
|
||||
}
|
||||
|
||||
declare <2 x i32> @llvm.vp.load.v2i32(<2 x i32>*, <2 x i1>, i32)
|
||||
|
||||
define <2 x i32> @vpload_v2i32(<2 x i32>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
|
||||
|
@ -112,6 +140,20 @@ define <8 x i32> @vpload_v8i32(<8 x i32>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
|
|||
ret <8 x i32> %load
|
||||
}
|
||||
|
||||
define <8 x i32> @vpload_v8i32_allones_mask(<8 x i32>* %ptr, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vpload_v8i32_allones_mask:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vmset.m v0
|
||||
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
|
||||
; CHECK-NEXT: vle32.v v8, (a0), v0.t
|
||||
; CHECK-NEXT: ret
|
||||
%a = insertelement <8 x i1> undef, i1 true, i32 0
|
||||
%b = shufflevector <8 x i1> %a, <8 x i1> poison, <8 x i32> zeroinitializer
|
||||
%load = call <8 x i32> @llvm.vp.load.v8i32(<8 x i32>* %ptr, <8 x i1> %b, i32 %evl)
|
||||
ret <8 x i32> %load
|
||||
}
|
||||
|
||||
declare <2 x i64> @llvm.vp.load.v2i64(<2 x i64>*, <2 x i1>, i32)
|
||||
|
||||
define <2 x i64> @vpload_v2i64(<2 x i64>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
|
||||
|
@ -136,6 +178,20 @@ define <4 x i64> @vpload_v4i64(<4 x i64>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
|
|||
ret <4 x i64> %load
|
||||
}
|
||||
|
||||
define <4 x i64> @vpload_v4i64_allones_mask(<4 x i64>* %ptr, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vpload_v4i64_allones_mask:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
|
||||
; CHECK-NEXT: vmset.m v0
|
||||
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vle64.v v8, (a0), v0.t
|
||||
; CHECK-NEXT: ret
|
||||
%a = insertelement <4 x i1> undef, i1 true, i32 0
|
||||
%b = shufflevector <4 x i1> %a, <4 x i1> poison, <4 x i32> zeroinitializer
|
||||
%load = call <4 x i64> @llvm.vp.load.v4i64(<4 x i64>* %ptr, <4 x i1> %b, i32 %evl)
|
||||
ret <4 x i64> %load
|
||||
}
|
||||
|
||||
declare <8 x i64> @llvm.vp.load.v8i64(<8 x i64>*, <8 x i1>, i32)
|
||||
|
||||
define <8 x i64> @vpload_v8i64(<8 x i64>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
|
||||
|
@ -160,6 +216,20 @@ define <2 x half> @vpload_v2f16(<2 x half>* %ptr, <2 x i1> %m, i32 zeroext %evl)
|
|||
ret <2 x half> %load
|
||||
}
|
||||
|
||||
define <2 x half> @vpload_v2f16_allones_mask(<2 x half>* %ptr, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vpload_v2f16_allones_mask:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
|
||||
; CHECK-NEXT: vmset.m v0
|
||||
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
|
||||
; CHECK-NEXT: vle16.v v8, (a0), v0.t
|
||||
; CHECK-NEXT: ret
|
||||
%a = insertelement <2 x i1> undef, i1 true, i32 0
|
||||
%b = shufflevector <2 x i1> %a, <2 x i1> poison, <2 x i32> zeroinitializer
|
||||
%load = call <2 x half> @llvm.vp.load.v2f16(<2 x half>* %ptr, <2 x i1> %b, i32 %evl)
|
||||
ret <2 x half> %load
|
||||
}
|
||||
|
||||
declare <4 x half> @llvm.vp.load.v4f16(<4 x half>*, <4 x i1>, i32)
|
||||
|
||||
define <4 x half> @vpload_v4f16(<4 x half>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
|
||||
|
@ -220,6 +290,20 @@ define <8 x float> @vpload_v8f32(<8 x float>* %ptr, <8 x i1> %m, i32 zeroext %ev
|
|||
ret <8 x float> %load
|
||||
}
|
||||
|
||||
define <8 x float> @vpload_v8f32_allones_mask(<8 x float>* %ptr, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vpload_v8f32_allones_mask:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vmset.m v0
|
||||
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
|
||||
; CHECK-NEXT: vle32.v v8, (a0), v0.t
|
||||
; CHECK-NEXT: ret
|
||||
%a = insertelement <8 x i1> undef, i1 true, i32 0
|
||||
%b = shufflevector <8 x i1> %a, <8 x i1> poison, <8 x i32> zeroinitializer
|
||||
%load = call <8 x float> @llvm.vp.load.v8f32(<8 x float>* %ptr, <8 x i1> %b, i32 %evl)
|
||||
ret <8 x float> %load
|
||||
}
|
||||
|
||||
declare <2 x double> @llvm.vp.load.v2f64(<2 x double>*, <2 x i1>, i32)
|
||||
|
||||
define <2 x double> @vpload_v2f64(<2 x double>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
|
||||
|
@ -244,6 +328,20 @@ define <4 x double> @vpload_v4f64(<4 x double>* %ptr, <4 x i1> %m, i32 zeroext %
|
|||
ret <4 x double> %load
|
||||
}
|
||||
|
||||
define <4 x double> @vpload_v4f64_allones_mask(<4 x double>* %ptr, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vpload_v4f64_allones_mask:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
|
||||
; CHECK-NEXT: vmset.m v0
|
||||
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
|
||||
; CHECK-NEXT: vle64.v v8, (a0), v0.t
|
||||
; CHECK-NEXT: ret
|
||||
%a = insertelement <4 x i1> undef, i1 true, i32 0
|
||||
%b = shufflevector <4 x i1> %a, <4 x i1> poison, <4 x i32> zeroinitializer
|
||||
%load = call <4 x double> @llvm.vp.load.v4f64(<4 x double>* %ptr, <4 x i1> %b, i32 %evl)
|
||||
ret <4 x double> %load
|
||||
}
|
||||
|
||||
declare <8 x double> @llvm.vp.load.v8f64(<8 x double>*, <8 x i1>, i32)
|
||||
|
||||
define <8 x double> @vpload_v8f64(<8 x double>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
|
||||
|
|
|
@ -16,6 +16,20 @@ define <vscale x 1 x i8> @vpload_nxv1i8(<vscale x 1 x i8>* %ptr, <vscale x 1 x i
|
|||
ret <vscale x 1 x i8> %load
|
||||
}
|
||||
|
||||
define <vscale x 1 x i8> @vpload_nxv1i8_allones_mask(<vscale x 1 x i8>* %ptr, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vpload_nxv1i8_allones_mask:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu
|
||||
; CHECK-NEXT: vmset.m v0
|
||||
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
|
||||
; CHECK-NEXT: vle8.v v8, (a0), v0.t
|
||||
; CHECK-NEXT: ret
|
||||
%a = insertelement <vscale x 1 x i1> undef, i1 true, i32 0
|
||||
%b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
|
||||
%load = call <vscale x 1 x i8> @llvm.vp.load.nxv1i8(<vscale x 1 x i8>* %ptr, <vscale x 1 x i1> %b, i32 %evl)
|
||||
ret <vscale x 1 x i8> %load
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i8> @llvm.vp.load.nxv2i8(<vscale x 2 x i8>*, <vscale x 2 x i1>, i32)
|
||||
|
||||
define <vscale x 2 x i8> @vpload_nxv2i8(<vscale x 2 x i8>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
|
||||
|
@ -52,6 +66,20 @@ define <vscale x 8 x i8> @vpload_nxv8i8(<vscale x 8 x i8>* %ptr, <vscale x 8 x i
|
|||
ret <vscale x 8 x i8> %load
|
||||
}
|
||||
|
||||
define <vscale x 8 x i8> @vpload_nxv8i8_allones_mask(<vscale x 8 x i8>* %ptr, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vpload_nxv8i8_allones_mask:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu
|
||||
; CHECK-NEXT: vmset.m v0
|
||||
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
|
||||
; CHECK-NEXT: vle8.v v8, (a0), v0.t
|
||||
; CHECK-NEXT: ret
|
||||
%a = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
|
||||
%b = shufflevector <vscale x 8 x i1> %a, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
|
||||
%load = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8(<vscale x 8 x i8>* %ptr, <vscale x 8 x i1> %b, i32 %evl)
|
||||
ret <vscale x 8 x i8> %load
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i16> @llvm.vp.load.nxv1i16(<vscale x 1 x i16>*, <vscale x 1 x i1>, i32)
|
||||
|
||||
define <vscale x 1 x i16> @vpload_nxv1i16(<vscale x 1 x i16>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
|
||||
|
@ -76,6 +104,20 @@ define <vscale x 2 x i16> @vpload_nxv2i16(<vscale x 2 x i16>* %ptr, <vscale x 2
|
|||
ret <vscale x 2 x i16> %load
|
||||
}
|
||||
|
||||
define <vscale x 2 x i16> @vpload_nxv2i16_allones_mask(<vscale x 2 x i16>* %ptr, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vpload_nxv2i16_allones_mask:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu
|
||||
; CHECK-NEXT: vmset.m v0
|
||||
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
|
||||
; CHECK-NEXT: vle16.v v8, (a0), v0.t
|
||||
; CHECK-NEXT: ret
|
||||
%a = insertelement <vscale x 2 x i1> undef, i1 true, i32 0
|
||||
%b = shufflevector <vscale x 2 x i1> %a, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
|
||||
%load = call <vscale x 2 x i16> @llvm.vp.load.nxv2i16(<vscale x 2 x i16>* %ptr, <vscale x 2 x i1> %b, i32 %evl)
|
||||
ret <vscale x 2 x i16> %load
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i16> @llvm.vp.load.nxv4i16(<vscale x 4 x i16>*, <vscale x 4 x i1>, i32)
|
||||
|
||||
define <vscale x 4 x i16> @vpload_nxv4i16(<vscale x 4 x i16>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
|
||||
|
@ -136,6 +178,20 @@ define <vscale x 4 x i32> @vpload_nxv4i32(<vscale x 4 x i32>* %ptr, <vscale x 4
|
|||
ret <vscale x 4 x i32> %load
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @vpload_nxv4i32_allones_mask(<vscale x 4 x i32>* %ptr, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vpload_nxv4i32_allones_mask:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vmset.m v0
|
||||
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
|
||||
; CHECK-NEXT: vle32.v v8, (a0), v0.t
|
||||
; CHECK-NEXT: ret
|
||||
%a = insertelement <vscale x 4 x i1> undef, i1 true, i32 0
|
||||
%b = shufflevector <vscale x 4 x i1> %a, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
|
||||
%load = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32(<vscale x 4 x i32>* %ptr, <vscale x 4 x i1> %b, i32 %evl)
|
||||
ret <vscale x 4 x i32> %load
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.vp.load.nxv8i32(<vscale x 8 x i32>*, <vscale x 8 x i1>, i32)
|
||||
|
||||
define <vscale x 8 x i32> @vpload_nxv8i32(<vscale x 8 x i32>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
|
||||
|
@ -160,6 +216,20 @@ define <vscale x 1 x i64> @vpload_nxv1i64(<vscale x 1 x i64>* %ptr, <vscale x 1
|
|||
ret <vscale x 1 x i64> %load
|
||||
}
|
||||
|
||||
define <vscale x 1 x i64> @vpload_nxv1i64_allones_mask(<vscale x 1 x i64>* %ptr, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vpload_nxv1i64_allones_mask:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu
|
||||
; CHECK-NEXT: vmset.m v0
|
||||
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
|
||||
; CHECK-NEXT: vle64.v v8, (a0), v0.t
|
||||
; CHECK-NEXT: ret
|
||||
%a = insertelement <vscale x 1 x i1> undef, i1 true, i32 0
|
||||
%b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
|
||||
%load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64(<vscale x 1 x i64>* %ptr, <vscale x 1 x i1> %b, i32 %evl)
|
||||
ret <vscale x 1 x i64> %load
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.vp.load.nxv2i64(<vscale x 2 x i64>*, <vscale x 2 x i1>, i32)
|
||||
|
||||
define <vscale x 2 x i64> @vpload_nxv2i64(<vscale x 2 x i64>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
|
||||
|
@ -220,6 +290,20 @@ define <vscale x 2 x half> @vpload_nxv2f16(<vscale x 2 x half>* %ptr, <vscale x
|
|||
ret <vscale x 2 x half> %load
|
||||
}
|
||||
|
||||
define <vscale x 2 x half> @vpload_nxv2f16_allones_mask(<vscale x 2 x half>* %ptr, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vpload_nxv2f16_allones_mask:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu
|
||||
; CHECK-NEXT: vmset.m v0
|
||||
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
|
||||
; CHECK-NEXT: vle16.v v8, (a0), v0.t
|
||||
; CHECK-NEXT: ret
|
||||
%a = insertelement <vscale x 2 x i1> undef, i1 true, i32 0
|
||||
%b = shufflevector <vscale x 2 x i1> %a, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
|
||||
%load = call <vscale x 2 x half> @llvm.vp.load.nxv2f16(<vscale x 2 x half>* %ptr, <vscale x 2 x i1> %b, i32 %evl)
|
||||
ret <vscale x 2 x half> %load
|
||||
}
|
||||
|
||||
declare <vscale x 4 x half> @llvm.vp.load.nxv4f16(<vscale x 4 x half>*, <vscale x 4 x i1>, i32)
|
||||
|
||||
define <vscale x 4 x half> @vpload_nxv4f16(<vscale x 4 x half>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
|
||||
|
@ -292,6 +376,20 @@ define <vscale x 8 x float> @vpload_nxv8f32(<vscale x 8 x float>* %ptr, <vscale
|
|||
ret <vscale x 8 x float> %load
|
||||
}
|
||||
|
||||
define <vscale x 8 x float> @vpload_nxv8f32_allones_mask(<vscale x 8 x float>* %ptr, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vpload_nxv8f32_allones_mask:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu
|
||||
; CHECK-NEXT: vmset.m v0
|
||||
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
|
||||
; CHECK-NEXT: vle32.v v8, (a0), v0.t
|
||||
; CHECK-NEXT: ret
|
||||
%a = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
|
||||
%b = shufflevector <vscale x 8 x i1> %a, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
|
||||
%load = call <vscale x 8 x float> @llvm.vp.load.nxv8f32(<vscale x 8 x float>* %ptr, <vscale x 8 x i1> %b, i32 %evl)
|
||||
ret <vscale x 8 x float> %load
|
||||
}
|
||||
|
||||
declare <vscale x 1 x double> @llvm.vp.load.nxv1f64(<vscale x 1 x double>*, <vscale x 1 x i1>, i32)
|
||||
|
||||
define <vscale x 1 x double> @vpload_nxv1f64(<vscale x 1 x double>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
|
||||
|
@ -328,6 +426,20 @@ define <vscale x 4 x double> @vpload_nxv4f64(<vscale x 4 x double>* %ptr, <vscal
|
|||
ret <vscale x 4 x double> %load
|
||||
}
|
||||
|
||||
define <vscale x 4 x double> @vpload_nxv4f64_allones_mask(<vscale x 4 x double>* %ptr, i32 zeroext %evl) {
|
||||
; CHECK-LABEL: vpload_nxv4f64_allones_mask:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vmset.m v0
|
||||
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
|
||||
; CHECK-NEXT: vle64.v v8, (a0), v0.t
|
||||
; CHECK-NEXT: ret
|
||||
%a = insertelement <vscale x 4 x i1> undef, i1 true, i32 0
|
||||
%b = shufflevector <vscale x 4 x i1> %a, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
|
||||
%load = call <vscale x 4 x double> @llvm.vp.load.nxv4f64(<vscale x 4 x double>* %ptr, <vscale x 4 x i1> %b, i32 %evl)
|
||||
ret <vscale x 4 x double> %load
|
||||
}
|
||||
|
||||
declare <vscale x 8 x double> @llvm.vp.load.nxv8f64(<vscale x 8 x double>*, <vscale x 8 x i1>, i32)
|
||||
|
||||
define <vscale x 8 x double> @vpload_nxv8f64(<vscale x 8 x double>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
|
||||
|
|
Loading…
Reference in New Issue