2017-07-05 01:32:00 +08:00
|
|
|
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck %s
|
|
|
|
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck %s
|
2013-08-07 07:08:28 +08:00
|
|
|
|
2014-10-02 01:15:17 +08:00
|
|
|
; CHECK-LABEL: {{^}}phi1:
|
2014-11-22 06:00:13 +08:00
|
|
|
; CHECK: s_buffer_load_dword [[DST:s[0-9]]], {{s\[[0-9]+:[0-9]+\]}}, 0x0
|
2014-11-05 22:50:53 +08:00
|
|
|
; CHECK: v_mov_b32_e32 v{{[0-9]}}, [[DST]]
|
2018-02-14 02:00:25 +08:00
|
|
|
define amdgpu_ps void @phi1(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <8 x i32> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
|
2013-08-07 07:08:28 +08:00
|
|
|
main_body:
|
2018-02-14 02:00:25 +08:00
|
|
|
%tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0
|
|
|
|
%tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp, !tbaa !0
|
2019-01-18 06:47:26 +08:00
|
|
|
%tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 0, i32 0)
|
|
|
|
%tmp22 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 16, i32 0)
|
|
|
|
%tmp23 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 32, i32 0)
|
2016-01-23 13:42:43 +08:00
|
|
|
%tmp24 = fptosi float %tmp22 to i32
|
|
|
|
%tmp25 = icmp ne i32 %tmp24, 0
|
|
|
|
br i1 %tmp25, label %ENDIF, label %ELSE
|
2013-08-07 07:08:28 +08:00
|
|
|
|
|
|
|
ELSE: ; preds = %main_body
|
2016-01-23 13:42:43 +08:00
|
|
|
%tmp26 = fsub float -0.000000e+00, %tmp21
|
2013-08-07 07:08:28 +08:00
|
|
|
br label %ENDIF
|
|
|
|
|
2016-01-23 13:42:43 +08:00
|
|
|
ENDIF: ; preds = %ELSE, %main_body
|
|
|
|
%temp.0 = phi float [ %tmp26, %ELSE ], [ %tmp21, %main_body ]
|
|
|
|
%tmp27 = fadd float %temp.0, %tmp23
|
2017-02-22 08:02:21 +08:00
|
|
|
call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp27, float %tmp27, float 0.000000e+00, float 1.000000e+00, i1 true, i1 true) #0
|
2013-08-07 07:08:28 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2013-08-23 04:21:02 +08:00
|
|
|
; Make sure this program doesn't crash
|
2014-10-02 01:15:17 +08:00
|
|
|
; CHECK-LABEL: {{^}}phi2:
|
2018-02-14 02:00:25 +08:00
|
|
|
define amdgpu_ps void @phi2(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <8 x i32> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #1 {
|
2013-08-23 04:21:02 +08:00
|
|
|
main_body:
|
2018-02-14 02:00:25 +08:00
|
|
|
%tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0
|
|
|
|
%tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp, !tbaa !0
|
2019-01-18 06:47:26 +08:00
|
|
|
%tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 16, i32 0)
|
|
|
|
%tmp22 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 32, i32 0)
|
|
|
|
%tmp23 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 36, i32 0)
|
|
|
|
%tmp24 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 40, i32 0)
|
|
|
|
%tmp25 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 48, i32 0)
|
|
|
|
%tmp26 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 52, i32 0)
|
|
|
|
%tmp27 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 56, i32 0)
|
|
|
|
%tmp28 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 64, i32 0)
|
|
|
|
%tmp29 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 68, i32 0)
|
|
|
|
%tmp30 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 72, i32 0)
|
|
|
|
%tmp31 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 76, i32 0)
|
|
|
|
%tmp32 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 80, i32 0)
|
|
|
|
%tmp33 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 84, i32 0)
|
|
|
|
%tmp34 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 88, i32 0)
|
|
|
|
%tmp35 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 92, i32 0)
|
2018-02-14 02:00:25 +08:00
|
|
|
%tmp36 = getelementptr <8 x i32>, <8 x i32> addrspace(4)* %arg2, i32 0
|
|
|
|
%tmp37 = load <8 x i32>, <8 x i32> addrspace(4)* %tmp36, !tbaa !0
|
|
|
|
%tmp38 = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg1, i32 0
|
|
|
|
%tmp39 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp38, !tbaa !0
|
2017-02-16 10:01:13 +08:00
|
|
|
%i.i = extractelement <2 x i32> %arg5, i32 0
|
|
|
|
%j.i = extractelement <2 x i32> %arg5, i32 1
|
|
|
|
%i.f.i = bitcast i32 %i.i to float
|
|
|
|
%j.f.i = bitcast i32 %j.i to float
|
2017-02-22 08:02:21 +08:00
|
|
|
%p1.i = call float @llvm.amdgcn.interp.p1(float %i.f.i, i32 0, i32 0, i32 %arg3) #1
|
|
|
|
%p2.i = call float @llvm.amdgcn.interp.p2(float %p1.i, float %j.f.i, i32 0, i32 0, i32 %arg3) #1
|
2017-02-16 10:01:13 +08:00
|
|
|
%i.i19 = extractelement <2 x i32> %arg5, i32 0
|
|
|
|
%j.i20 = extractelement <2 x i32> %arg5, i32 1
|
|
|
|
%i.f.i21 = bitcast i32 %i.i19 to float
|
|
|
|
%j.f.i22 = bitcast i32 %j.i20 to float
|
2017-02-22 08:02:21 +08:00
|
|
|
%p1.i23 = call float @llvm.amdgcn.interp.p1(float %i.f.i21, i32 1, i32 0, i32 %arg3) #1
|
|
|
|
%p2.i24 = call float @llvm.amdgcn.interp.p2(float %p1.i23, float %j.f.i22, i32 1, i32 0, i32 %arg3) #1
|
2017-02-16 10:01:13 +08:00
|
|
|
%i.i13 = extractelement <2 x i32> %arg5, i32 0
|
|
|
|
%j.i14 = extractelement <2 x i32> %arg5, i32 1
|
|
|
|
%i.f.i15 = bitcast i32 %i.i13 to float
|
|
|
|
%j.f.i16 = bitcast i32 %j.i14 to float
|
2017-02-22 08:02:21 +08:00
|
|
|
%p1.i17 = call float @llvm.amdgcn.interp.p1(float %i.f.i15, i32 0, i32 1, i32 %arg3) #1
|
|
|
|
%p2.i18 = call float @llvm.amdgcn.interp.p2(float %p1.i17, float %j.f.i16, i32 0, i32 1, i32 %arg3) #1
|
2017-02-16 10:01:13 +08:00
|
|
|
%i.i7 = extractelement <2 x i32> %arg5, i32 0
|
|
|
|
%j.i8 = extractelement <2 x i32> %arg5, i32 1
|
|
|
|
%i.f.i9 = bitcast i32 %i.i7 to float
|
|
|
|
%j.f.i10 = bitcast i32 %j.i8 to float
|
2017-02-22 08:02:21 +08:00
|
|
|
%p1.i11 = call float @llvm.amdgcn.interp.p1(float %i.f.i9, i32 1, i32 1, i32 %arg3) #1
|
|
|
|
%p2.i12 = call float @llvm.amdgcn.interp.p2(float %p1.i11, float %j.f.i10, i32 1, i32 1, i32 %arg3) #1
|
2017-02-16 10:01:13 +08:00
|
|
|
%i.i1 = extractelement <2 x i32> %arg5, i32 0
|
|
|
|
%j.i2 = extractelement <2 x i32> %arg5, i32 1
|
|
|
|
%i.f.i3 = bitcast i32 %i.i1 to float
|
|
|
|
%j.f.i4 = bitcast i32 %j.i2 to float
|
2017-02-22 08:02:21 +08:00
|
|
|
%p1.i5 = call float @llvm.amdgcn.interp.p1(float %i.f.i3, i32 2, i32 1, i32 %arg3) #1
|
|
|
|
%p2.i6 = call float @llvm.amdgcn.interp.p2(float %p1.i5, float %j.f.i4, i32 2, i32 1, i32 %arg3) #1
|
2017-06-29 05:38:50 +08:00
|
|
|
%tmp39.bc = bitcast <4 x i32> %tmp39 to <4 x i32>
|
AMDGPU: Convert test cases to the dimension-aware intrinsics
Summary:
Also explicitly port over some tests in llvm.amdgcn.image.* that were
missing. Some tests are removed because they no longer apply (i.e.
explicitly testing building an address vector via insertelement).
This is in preparation for the eventual removal of the old-style
intrinsics.
Some additional notes:
- constant-address-space-32bit.ll: change some GCN-NEXT to GCN because
the instruction schedule was subtly altered
- insert_vector_elt.ll: the old test didn't actually test anything,
because %tmp1 was not used; remove the load, because it doesn't work
(Because of the amdgpu_ps calling convention? In any case, it's
orthogonal to what the test claims to be testing.)
Change-Id: Idfa99b6512ad139e755e82b8b89548ab08f0afcf
Reviewers: arsenm, rampitec
Subscribers: MatzeB, qcolombet, kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D48018
llvm-svn: 335229
2018-06-21 21:37:19 +08:00
|
|
|
%tmp1 = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 15, float %p2.i, float %p2.i24, <8 x i32> %tmp37, <4 x i32> %tmp39.bc, i1 0, i32 0, i32 0)
|
2017-03-22 00:24:12 +08:00
|
|
|
%tmp50 = extractelement <4 x float> %tmp1, i32 2
|
2017-02-16 10:01:13 +08:00
|
|
|
%tmp51 = call float @llvm.fabs.f32(float %tmp50)
|
|
|
|
%tmp52 = fmul float %p2.i18, %p2.i18
|
|
|
|
%tmp53 = fmul float %p2.i12, %p2.i12
|
2016-01-23 13:42:43 +08:00
|
|
|
%tmp54 = fadd float %tmp53, %tmp52
|
2017-02-16 10:01:13 +08:00
|
|
|
%tmp55 = fmul float %p2.i6, %p2.i6
|
2016-01-23 13:42:43 +08:00
|
|
|
%tmp56 = fadd float %tmp54, %tmp55
|
|
|
|
%tmp57 = call float @llvm.amdgcn.rsq.f32(float %tmp56)
|
2017-02-16 10:01:13 +08:00
|
|
|
%tmp58 = fmul float %p2.i18, %tmp57
|
|
|
|
%tmp59 = fmul float %p2.i12, %tmp57
|
|
|
|
%tmp60 = fmul float %p2.i6, %tmp57
|
2016-01-23 13:42:43 +08:00
|
|
|
%tmp61 = fmul float %tmp58, %tmp22
|
|
|
|
%tmp62 = fmul float %tmp59, %tmp23
|
|
|
|
%tmp63 = fadd float %tmp62, %tmp61
|
|
|
|
%tmp64 = fmul float %tmp60, %tmp24
|
|
|
|
%tmp65 = fadd float %tmp63, %tmp64
|
|
|
|
%tmp66 = fsub float -0.000000e+00, %tmp25
|
|
|
|
%tmp67 = fmul float %tmp65, %tmp51
|
|
|
|
%tmp68 = fadd float %tmp67, %tmp66
|
|
|
|
%tmp69 = fmul float %tmp26, %tmp68
|
|
|
|
%tmp70 = fmul float %tmp27, %tmp68
|
2017-02-16 10:01:13 +08:00
|
|
|
%tmp71 = call float @llvm.fabs.f32(float %tmp69)
|
2016-01-23 13:42:43 +08:00
|
|
|
%tmp72 = fcmp olt float 0x3EE4F8B580000000, %tmp71
|
|
|
|
%tmp73 = sext i1 %tmp72 to i32
|
|
|
|
%tmp74 = bitcast i32 %tmp73 to float
|
|
|
|
%tmp75 = bitcast float %tmp74 to i32
|
|
|
|
%tmp76 = icmp ne i32 %tmp75, 0
|
|
|
|
br i1 %tmp76, label %IF, label %ENDIF
|
2013-08-23 04:21:02 +08:00
|
|
|
|
|
|
|
IF: ; preds = %main_body
|
2016-01-23 13:42:43 +08:00
|
|
|
%tmp77 = fsub float -0.000000e+00, %tmp69
|
2016-01-23 13:42:49 +08:00
|
|
|
%tmp78 = call float @llvm.exp2.f32(float %tmp77)
|
2016-01-23 13:42:43 +08:00
|
|
|
%tmp79 = fsub float -0.000000e+00, %tmp78
|
|
|
|
%tmp80 = fadd float 1.000000e+00, %tmp79
|
|
|
|
%tmp81 = fdiv float 1.000000e+00, %tmp69
|
|
|
|
%tmp82 = fmul float %tmp80, %tmp81
|
|
|
|
%tmp83 = fmul float %tmp31, %tmp82
|
2013-08-23 04:21:02 +08:00
|
|
|
br label %ENDIF
|
|
|
|
|
2016-01-23 13:42:43 +08:00
|
|
|
ENDIF: ; preds = %IF, %main_body
|
|
|
|
%temp4.0 = phi float [ %tmp83, %IF ], [ %tmp31, %main_body ]
|
2017-02-16 10:01:13 +08:00
|
|
|
%tmp84 = call float @llvm.fabs.f32(float %tmp70)
|
2016-01-23 13:42:43 +08:00
|
|
|
%tmp85 = fcmp olt float 0x3EE4F8B580000000, %tmp84
|
|
|
|
%tmp86 = sext i1 %tmp85 to i32
|
|
|
|
%tmp87 = bitcast i32 %tmp86 to float
|
|
|
|
%tmp88 = bitcast float %tmp87 to i32
|
|
|
|
%tmp89 = icmp ne i32 %tmp88, 0
|
|
|
|
br i1 %tmp89, label %IF25, label %ENDIF24
|
2013-08-23 04:21:02 +08:00
|
|
|
|
|
|
|
IF25: ; preds = %ENDIF
|
2016-01-23 13:42:43 +08:00
|
|
|
%tmp90 = fsub float -0.000000e+00, %tmp70
|
2016-01-23 13:42:49 +08:00
|
|
|
%tmp91 = call float @llvm.exp2.f32(float %tmp90)
|
2016-01-23 13:42:43 +08:00
|
|
|
%tmp92 = fsub float -0.000000e+00, %tmp91
|
|
|
|
%tmp93 = fadd float 1.000000e+00, %tmp92
|
|
|
|
%tmp94 = fdiv float 1.000000e+00, %tmp70
|
|
|
|
%tmp95 = fmul float %tmp93, %tmp94
|
|
|
|
%tmp96 = fmul float %tmp35, %tmp95
|
2013-08-23 04:21:02 +08:00
|
|
|
br label %ENDIF24
|
|
|
|
|
2016-01-23 13:42:43 +08:00
|
|
|
ENDIF24: ; preds = %IF25, %ENDIF
|
|
|
|
%temp8.0 = phi float [ %tmp96, %IF25 ], [ %tmp35, %ENDIF ]
|
|
|
|
%tmp97 = fmul float %tmp28, %temp4.0
|
|
|
|
%tmp98 = fmul float %tmp29, %temp4.0
|
|
|
|
%tmp99 = fmul float %tmp30, %temp4.0
|
|
|
|
%tmp100 = fmul float %tmp32, %temp8.0
|
|
|
|
%tmp101 = fadd float %tmp100, %tmp97
|
|
|
|
%tmp102 = fmul float %tmp33, %temp8.0
|
|
|
|
%tmp103 = fadd float %tmp102, %tmp98
|
|
|
|
%tmp104 = fmul float %tmp34, %temp8.0
|
|
|
|
%tmp105 = fadd float %tmp104, %tmp99
|
|
|
|
%tmp106 = call float @llvm.pow.f32(float %tmp51, float %tmp21)
|
|
|
|
%tmp107 = fsub float -0.000000e+00, %tmp101
|
|
|
|
%tmp108 = fmul float %tmp107, %tmp106
|
|
|
|
%tmp109 = fsub float -0.000000e+00, %tmp103
|
|
|
|
%tmp110 = fmul float %tmp109, %tmp106
|
|
|
|
%tmp111 = fsub float -0.000000e+00, %tmp105
|
|
|
|
%tmp112 = fmul float %tmp111, %tmp106
|
2017-02-22 08:27:34 +08:00
|
|
|
%tmp113 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %tmp108, float %tmp110)
|
|
|
|
%tmp115 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %tmp112, float 1.000000e+00)
|
|
|
|
call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp113, <2 x half> %tmp115, i1 true, i1 true) #0
|
2013-08-23 04:21:02 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2019-02-22 04:04:20 +08:00
|
|
|
; We just want to make sure the program doesn't crash
|
2014-10-02 01:15:17 +08:00
|
|
|
; CHECK-LABEL: {{^}}loop:
|
2018-02-14 02:00:25 +08:00
|
|
|
define amdgpu_ps void @loop(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <8 x i32> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
|
2013-08-07 07:08:28 +08:00
|
|
|
main_body:
|
2018-02-14 02:00:25 +08:00
|
|
|
%tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0
|
|
|
|
%tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp, !tbaa !0
|
2019-01-18 06:47:26 +08:00
|
|
|
%tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 0, i32 0)
|
|
|
|
%tmp22 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 4, i32 0)
|
|
|
|
%tmp23 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 8, i32 0)
|
|
|
|
%tmp24 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 12, i32 0)
|
2016-01-23 13:42:43 +08:00
|
|
|
%tmp25 = fptosi float %tmp24 to i32
|
|
|
|
%tmp26 = bitcast i32 %tmp25 to float
|
|
|
|
%tmp27 = bitcast float %tmp26 to i32
|
2013-08-07 07:08:28 +08:00
|
|
|
br label %LOOP
|
|
|
|
|
|
|
|
LOOP: ; preds = %ENDIF, %main_body
|
2016-01-23 13:42:43 +08:00
|
|
|
%temp4.0 = phi float [ %tmp21, %main_body ], [ %temp5.0, %ENDIF ]
|
|
|
|
%temp5.0 = phi float [ %tmp22, %main_body ], [ %temp6.0, %ENDIF ]
|
|
|
|
%temp6.0 = phi float [ %tmp23, %main_body ], [ %temp4.0, %ENDIF ]
|
|
|
|
%temp8.0 = phi float [ 0.000000e+00, %main_body ], [ %tmp36, %ENDIF ]
|
|
|
|
%tmp28 = bitcast float %temp8.0 to i32
|
|
|
|
%tmp29 = icmp sge i32 %tmp28, %tmp27
|
|
|
|
%tmp30 = sext i1 %tmp29 to i32
|
|
|
|
%tmp31 = bitcast i32 %tmp30 to float
|
|
|
|
%tmp32 = bitcast float %tmp31 to i32
|
|
|
|
%tmp33 = icmp ne i32 %tmp32, 0
|
|
|
|
br i1 %tmp33, label %IF, label %ENDIF
|
2013-08-07 07:08:28 +08:00
|
|
|
|
|
|
|
IF: ; preds = %LOOP
|
2017-02-22 08:02:21 +08:00
|
|
|
call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %temp4.0, float %temp5.0, float %temp6.0, float 1.000000e+00, i1 true, i1 true) #0
|
2013-08-07 07:08:28 +08:00
|
|
|
ret void
|
|
|
|
|
|
|
|
ENDIF: ; preds = %LOOP
|
2016-01-23 13:42:43 +08:00
|
|
|
%tmp34 = bitcast float %temp8.0 to i32
|
|
|
|
%tmp35 = add i32 %tmp34, 1
|
|
|
|
%tmp36 = bitcast i32 %tmp35 to float
|
2013-08-07 07:08:28 +08:00
|
|
|
br label %LOOP
|
|
|
|
}
|
|
|
|
|
2013-11-16 02:26:45 +08:00
|
|
|
; This checks for a bug in the FixSGPRCopies pass where VReg96
|
|
|
|
; registers were being identified as an SGPR regclass which was causing
|
|
|
|
; an assertion failure.
|
|
|
|
|
2014-10-02 01:15:17 +08:00
|
|
|
; CHECK-LABEL: {{^}}sample_v3:
|
2016-12-06 09:02:51 +08:00
|
|
|
; CHECK: v_mov_b32_e32 v[[SAMPLE_LO:[0-9]+]], 11
|
|
|
|
; CHECK: v_mov_b32_e32 v[[SAMPLE_HI:[0-9]+]], 13
|
|
|
|
; CHECK: s_branch
|
|
|
|
|
|
|
|
; CHECK-DAG: v_mov_b32_e32 v[[SAMPLE_LO:[0-9]+]], 5
|
|
|
|
; CHECK-DAG: v_mov_b32_e32 v[[SAMPLE_HI:[0-9]+]], 7
|
|
|
|
|
|
|
|
; CHECK: BB{{[0-9]+_[0-9]+}}:
|
|
|
|
; CHECK: image_sample v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[SAMPLE_LO]]:[[SAMPLE_HI]]{{\]}}
|
2014-11-05 22:50:53 +08:00
|
|
|
; CHECK: exp
|
|
|
|
; CHECK: s_endpgm
|
2019-07-13 04:12:17 +08:00
|
|
|
define amdgpu_ps void @sample_v3([17 x <4 x i32>] addrspace(4)* inreg %arg, [32 x <4 x i32>] addrspace(4)* inreg %arg1, [16 x <8 x i32>] addrspace(4)* inreg %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 {
|
2013-11-16 02:26:45 +08:00
|
|
|
entry:
|
2018-02-14 02:00:25 +08:00
|
|
|
%tmp = getelementptr [17 x <4 x i32>], [17 x <4 x i32>] addrspace(4)* %arg, i64 0, i32 0
|
|
|
|
%tmp21 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp, !tbaa !0
|
2019-01-18 06:47:26 +08:00
|
|
|
%tmp22 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp21, i32 16, i32 0)
|
2018-02-14 02:00:25 +08:00
|
|
|
%tmp23 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(4)* %arg2, i64 0, i32 0
|
|
|
|
%tmp24 = load <8 x i32>, <8 x i32> addrspace(4)* %tmp23, !tbaa !0
|
|
|
|
%tmp25 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(4)* %arg1, i64 0, i32 0
|
|
|
|
%tmp26 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp25, !tbaa !0
|
2016-01-23 13:42:43 +08:00
|
|
|
%tmp27 = fcmp oeq float %tmp22, 0.000000e+00
|
2017-06-29 05:38:50 +08:00
|
|
|
%tmp26.bc = bitcast <4 x i32> %tmp26 to <4 x i32>
|
2016-01-23 13:42:43 +08:00
|
|
|
br i1 %tmp27, label %if, label %else
|
|
|
|
|
|
|
|
if: ; preds = %entry
|
AMDGPU: Convert test cases to the dimension-aware intrinsics
Summary:
Also explicitly port over some tests in llvm.amdgcn.image.* that were
missing. Some tests are removed because they no longer apply (i.e.
explicitly testing building an address vector via insertelement).
This is in preparation for the eventual removal of the old-style
intrinsics.
Some additional notes:
- constant-address-space-32bit.ll: change some GCN-NEXT to GCN because
the instruction schedule was subtly altered
- insert_vector_elt.ll: the old test didn't actually test anything,
because %tmp1 was not used; remove the load, because it doesn't work
(Because of the amdgpu_ps calling convention? In any case, it's
orthogonal to what the test claims to be testing.)
Change-Id: Idfa99b6512ad139e755e82b8b89548ab08f0afcf
Reviewers: arsenm, rampitec
Subscribers: MatzeB, qcolombet, kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D48018
llvm-svn: 335229
2018-06-21 21:37:19 +08:00
|
|
|
%tmp1 = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 15, float 0x36D6000000000000, float 0x36DA000000000000, <8 x i32> %tmp24, <4 x i32> %tmp26.bc, i1 0, i32 0, i32 0)
|
2017-03-22 00:24:12 +08:00
|
|
|
%val.if.0 = extractelement <4 x float> %tmp1, i32 0
|
|
|
|
%val.if.1 = extractelement <4 x float> %tmp1, i32 1
|
|
|
|
%val.if.2 = extractelement <4 x float> %tmp1, i32 2
|
2013-11-16 02:26:45 +08:00
|
|
|
br label %endif
|
|
|
|
|
2016-01-23 13:42:43 +08:00
|
|
|
else: ; preds = %entry
|
AMDGPU: Convert test cases to the dimension-aware intrinsics
Summary:
Also explicitly port over some tests in llvm.amdgcn.image.* that were
missing. Some tests are removed because they no longer apply (i.e.
explicitly testing building an address vector via insertelement).
This is in preparation for the eventual removal of the old-style
intrinsics.
Some additional notes:
- constant-address-space-32bit.ll: change some GCN-NEXT to GCN because
the instruction schedule was subtly altered
- insert_vector_elt.ll: the old test didn't actually test anything,
because %tmp1 was not used; remove the load, because it doesn't work
(Because of the amdgpu_ps calling convention? In any case, it's
orthogonal to what the test claims to be testing.)
Change-Id: Idfa99b6512ad139e755e82b8b89548ab08f0afcf
Reviewers: arsenm, rampitec
Subscribers: MatzeB, qcolombet, kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D48018
llvm-svn: 335229
2018-06-21 21:37:19 +08:00
|
|
|
%tmp2 = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 15, float 0x36C4000000000000, float 0x36CC000000000000, <8 x i32> %tmp24, <4 x i32> %tmp26.bc, i1 0, i32 0, i32 0)
|
2017-03-22 00:24:12 +08:00
|
|
|
%val.else.0 = extractelement <4 x float> %tmp2, i32 0
|
|
|
|
%val.else.1 = extractelement <4 x float> %tmp2, i32 1
|
|
|
|
%val.else.2 = extractelement <4 x float> %tmp2, i32 2
|
2013-11-16 02:26:45 +08:00
|
|
|
br label %endif
|
|
|
|
|
2016-01-23 13:42:43 +08:00
|
|
|
endif: ; preds = %else, %if
|
|
|
|
%val.0 = phi float [ %val.if.0, %if ], [ %val.else.0, %else ]
|
|
|
|
%val.1 = phi float [ %val.if.1, %if ], [ %val.else.1, %else ]
|
|
|
|
%val.2 = phi float [ %val.if.2, %if ], [ %val.else.2, %else ]
|
2017-02-22 08:02:21 +08:00
|
|
|
call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %val.0, float %val.1, float %val.2, float 0.000000e+00, i1 true, i1 true) #0
|
2013-11-16 02:26:45 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2014-10-02 01:15:17 +08:00
|
|
|
; CHECK-LABEL: {{^}}copy1:
|
2014-11-05 22:50:53 +08:00
|
|
|
; CHECK: buffer_load_dword
|
|
|
|
; CHECK: v_add
|
|
|
|
; CHECK: s_endpgm
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @copy1(float addrspace(1)* %out, float addrspace(1)* %in0) {
|
2013-11-19 02:50:15 +08:00
|
|
|
entry:
|
2016-01-23 13:42:43 +08:00
|
|
|
%tmp = load float, float addrspace(1)* %in0
|
|
|
|
%tmp1 = fcmp oeq float %tmp, 0.000000e+00
|
|
|
|
br i1 %tmp1, label %if0, label %endif
|
2013-11-19 02:50:15 +08:00
|
|
|
|
2016-01-23 13:42:43 +08:00
|
|
|
if0: ; preds = %entry
|
|
|
|
%tmp2 = bitcast float %tmp to i32
|
|
|
|
%tmp3 = fcmp olt float %tmp, 0.000000e+00
|
|
|
|
br i1 %tmp3, label %if1, label %endif
|
2013-11-19 02:50:15 +08:00
|
|
|
|
2016-01-23 13:42:43 +08:00
|
|
|
if1: ; preds = %if0
|
|
|
|
%tmp4 = add i32 %tmp2, 1
|
2013-11-19 02:50:15 +08:00
|
|
|
br label %endif
|
|
|
|
|
2016-01-23 13:42:43 +08:00
|
|
|
endif: ; preds = %if1, %if0, %entry
|
|
|
|
%tmp5 = phi i32 [ 0, %entry ], [ %tmp2, %if0 ], [ %tmp4, %if1 ]
|
|
|
|
%tmp6 = bitcast i32 %tmp5 to float
|
|
|
|
store float %tmp6, float addrspace(1)* %out
|
2013-11-19 02:50:15 +08:00
|
|
|
ret void
|
|
|
|
}
|
2013-11-19 02:50:20 +08:00
|
|
|
|
|
|
|
; This test is just checking that we don't crash / assertion fail.
|
2014-10-02 01:15:17 +08:00
|
|
|
; CHECK-LABEL: {{^}}copy2:
|
2014-11-05 22:50:53 +08:00
|
|
|
; CHECK: s_endpgm
|
2019-07-13 04:12:17 +08:00
|
|
|
define amdgpu_ps void @copy2([17 x <4 x i32>] addrspace(4)* inreg %arg, [32 x <4 x i32>] addrspace(4)* inreg %arg1, [16 x <8 x i32>] addrspace(4)* inreg %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 {
|
2013-11-19 02:50:20 +08:00
|
|
|
entry:
|
|
|
|
br label %LOOP68
|
|
|
|
|
2016-01-23 13:42:43 +08:00
|
|
|
LOOP68: ; preds = %ENDIF69, %entry
|
2013-11-19 02:50:20 +08:00
|
|
|
%temp4.7 = phi float [ 0.000000e+00, %entry ], [ %v, %ENDIF69 ]
|
|
|
|
%t = phi i32 [ 20, %entry ], [ %x, %ENDIF69 ]
|
|
|
|
%g = icmp eq i32 0, %t
|
|
|
|
%l = bitcast float %temp4.7 to i32
|
|
|
|
br i1 %g, label %IF70, label %ENDIF69
|
|
|
|
|
2016-01-23 13:42:43 +08:00
|
|
|
IF70: ; preds = %LOOP68
|
2013-11-19 02:50:20 +08:00
|
|
|
%q = icmp ne i32 %l, 13
|
|
|
|
%temp.8 = select i1 %q, float 1.000000e+00, float 0.000000e+00
|
2017-02-22 08:02:21 +08:00
|
|
|
call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %temp.8, float 0.000000e+00, float 0.000000e+00, float 1.000000e+00, i1 true, i1 true) #0
|
2013-11-19 02:50:20 +08:00
|
|
|
ret void
|
|
|
|
|
2016-01-23 13:42:43 +08:00
|
|
|
ENDIF69: ; preds = %LOOP68
|
2013-11-19 02:50:20 +08:00
|
|
|
%u = add i32 %l, %t
|
|
|
|
%v = bitcast i32 %u to float
|
|
|
|
%x = add i32 %t, -1
|
|
|
|
br label %LOOP68
|
|
|
|
}
|
|
|
|
|
2014-11-19 04:39:39 +08:00
|
|
|
; This test checks that image_sample resource descriptors aren't loaded into
|
|
|
|
; vgprs. The verifier will fail if this happens.
|
2016-12-06 09:02:51 +08:00
|
|
|
; CHECK-LABEL:{{^}}sample_rsrc
|
|
|
|
|
|
|
|
; CHECK: s_cmp_eq_u32
|
|
|
|
; CHECK: s_cbranch_scc0 [[END:BB[0-9]+_[0-9]+]]
|
|
|
|
|
2017-11-21 02:24:21 +08:00
|
|
|
; CHECK: v_add_{{[iu]}}32_e32 v[[ADD:[0-9]+]], vcc, 1, v{{[0-9]+}}
|
2016-12-06 09:02:51 +08:00
|
|
|
|
|
|
|
; [[END]]:
|
|
|
|
; CHECK: image_sample v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+}}:[[ADD]]{{\]}}
|
2014-11-19 04:39:39 +08:00
|
|
|
; CHECK: s_endpgm
|
2019-07-13 04:12:17 +08:00
|
|
|
define amdgpu_ps void @sample_rsrc([6 x <4 x i32>] addrspace(4)* inreg %arg, [17 x <4 x i32>] addrspace(4)* inreg %arg1, [16 x <4 x i32>] addrspace(4)* inreg %arg2, [32 x <8 x i32>] addrspace(4)* inreg %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, i32 %arg19, float %arg20, float %arg21) #0 {
|
2014-11-19 04:39:39 +08:00
|
|
|
bb:
|
2018-02-14 02:00:25 +08:00
|
|
|
%tmp = getelementptr [17 x <4 x i32>], [17 x <4 x i32>] addrspace(4)* %arg1, i32 0, i32 0
|
|
|
|
%tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp, !tbaa !3
|
2019-01-18 06:47:26 +08:00
|
|
|
%tmp23 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp22, i32 16, i32 0)
|
2018-02-14 02:00:25 +08:00
|
|
|
%tmp25 = getelementptr [32 x <8 x i32>], [32 x <8 x i32>] addrspace(4)* %arg3, i32 0, i32 0
|
|
|
|
%tmp26 = load <8 x i32>, <8 x i32> addrspace(4)* %tmp25, !tbaa !3
|
|
|
|
%tmp27 = getelementptr [16 x <4 x i32>], [16 x <4 x i32>] addrspace(4)* %arg2, i32 0, i32 0
|
|
|
|
%tmp28 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp27, !tbaa !3
|
2017-02-16 10:01:13 +08:00
|
|
|
%i.i = extractelement <2 x i32> %arg7, i32 0
|
|
|
|
%j.i = extractelement <2 x i32> %arg7, i32 1
|
|
|
|
%i.f.i = bitcast i32 %i.i to float
|
|
|
|
%j.f.i = bitcast i32 %j.i to float
|
2017-02-22 08:02:21 +08:00
|
|
|
%p1.i = call float @llvm.amdgcn.interp.p1(float %i.f.i, i32 0, i32 0, i32 %arg5) #0
|
|
|
|
%p2.i = call float @llvm.amdgcn.interp.p2(float %p1.i, float %j.f.i, i32 0, i32 0, i32 %arg5) #0
|
2017-02-16 10:01:13 +08:00
|
|
|
%i.i1 = extractelement <2 x i32> %arg7, i32 0
|
|
|
|
%j.i2 = extractelement <2 x i32> %arg7, i32 1
|
|
|
|
%i.f.i3 = bitcast i32 %i.i1 to float
|
|
|
|
%j.f.i4 = bitcast i32 %j.i2 to float
|
2017-02-22 08:02:21 +08:00
|
|
|
%p1.i5 = call float @llvm.amdgcn.interp.p1(float %i.f.i3, i32 1, i32 0, i32 %arg5) #0
|
|
|
|
%p2.i6 = call float @llvm.amdgcn.interp.p2(float %p1.i5, float %j.f.i4, i32 1, i32 0, i32 %arg5) #0
|
2014-11-19 04:39:39 +08:00
|
|
|
%tmp31 = bitcast float %tmp23 to i32
|
|
|
|
%tmp36 = icmp ne i32 %tmp31, 0
|
|
|
|
br i1 %tmp36, label %bb38, label %bb80
|
|
|
|
|
|
|
|
bb38: ; preds = %bb
|
2016-01-26 12:38:08 +08:00
|
|
|
%tmp56 = bitcast <8 x i32> %tmp26 to <8 x i32>
|
AMDGPU: Convert test cases to the dimension-aware intrinsics
Summary:
Also explicitly port over some tests in llvm.amdgcn.image.* that were
missing. Some tests are removed because they no longer apply (i.e.
explicitly testing building an address vector via insertelement).
This is in preparation for the eventual removal of the old-style
intrinsics.
Some additional notes:
- constant-address-space-32bit.ll: change some GCN-NEXT to GCN because
the instruction schedule was subtly altered
- insert_vector_elt.ll: the old test didn't actually test anything,
because %tmp1 was not used; remove the load, because it doesn't work
(Because of the amdgpu_ps calling convention? In any case, it's
orthogonal to what the test claims to be testing.)
Change-Id: Idfa99b6512ad139e755e82b8b89548ab08f0afcf
Reviewers: arsenm, rampitec
Subscribers: MatzeB, qcolombet, kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D48018
llvm-svn: 335229
2018-06-21 21:37:19 +08:00
|
|
|
%tmp2 = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 15, float %p2.i, float %p2.i6, <8 x i32> %tmp56, <4 x i32> %tmp28, i1 0, i32 0, i32 0)
|
2014-11-19 04:39:39 +08:00
|
|
|
br label %bb71
|
|
|
|
|
|
|
|
bb80: ; preds = %bb
|
2017-02-16 10:01:13 +08:00
|
|
|
%tmp81 = bitcast float %p2.i to i32
|
|
|
|
%tmp82 = bitcast float %p2.i6 to i32
|
2014-11-19 04:39:39 +08:00
|
|
|
%tmp82.2 = add i32 %tmp82, 1
|
AMDGPU: Convert test cases to the dimension-aware intrinsics
Summary:
Also explicitly port over some tests in llvm.amdgcn.image.* that were
missing. Some tests are removed because they no longer apply (i.e.
explicitly testing building an address vector via insertelement).
This is in preparation for the eventual removal of the old-style
intrinsics.
Some additional notes:
- constant-address-space-32bit.ll: change some GCN-NEXT to GCN because
the instruction schedule was subtly altered
- insert_vector_elt.ll: the old test didn't actually test anything,
because %tmp1 was not used; remove the load, because it doesn't work
(Because of the amdgpu_ps calling convention? In any case, it's
orthogonal to what the test claims to be testing.)
Change-Id: Idfa99b6512ad139e755e82b8b89548ab08f0afcf
Reviewers: arsenm, rampitec
Subscribers: MatzeB, qcolombet, kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D48018
llvm-svn: 335229
2018-06-21 21:37:19 +08:00
|
|
|
%tmp83 = bitcast i32 %tmp81 to float
|
|
|
|
%tmp84 = bitcast i32 %tmp82.2 to float
|
2016-01-26 12:38:08 +08:00
|
|
|
%tmp85 = bitcast <8 x i32> %tmp26 to <8 x i32>
|
AMDGPU: Convert test cases to the dimension-aware intrinsics
Summary:
Also explicitly port over some tests in llvm.amdgcn.image.* that were
missing. Some tests are removed because they no longer apply (i.e.
explicitly testing building an address vector via insertelement).
This is in preparation for the eventual removal of the old-style
intrinsics.
Some additional notes:
- constant-address-space-32bit.ll: change some GCN-NEXT to GCN because
the instruction schedule was subtly altered
- insert_vector_elt.ll: the old test didn't actually test anything,
because %tmp1 was not used; remove the load, because it doesn't work
(Because of the amdgpu_ps calling convention? In any case, it's
orthogonal to what the test claims to be testing.)
Change-Id: Idfa99b6512ad139e755e82b8b89548ab08f0afcf
Reviewers: arsenm, rampitec
Subscribers: MatzeB, qcolombet, kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D48018
llvm-svn: 335229
2018-06-21 21:37:19 +08:00
|
|
|
%tmp3 = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 15, float %tmp83, float %tmp84, <8 x i32> %tmp85, <4 x i32> %tmp28, i1 0, i32 0, i32 0)
|
2014-11-19 04:39:39 +08:00
|
|
|
br label %bb71
|
|
|
|
|
|
|
|
bb71: ; preds = %bb80, %bb38
|
2017-03-22 00:24:12 +08:00
|
|
|
%tmp72 = phi <4 x float> [ %tmp2, %bb38 ], [ %tmp3, %bb80 ]
|
2014-11-19 04:39:39 +08:00
|
|
|
%tmp88 = extractelement <4 x float> %tmp72, i32 0
|
2017-02-22 08:02:21 +08:00
|
|
|
call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp88, float %tmp88, float %tmp88, float %tmp88, i1 true, i1 true) #0
|
2014-11-19 04:39:39 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2018-01-24 13:04:35 +08:00
|
|
|
; Check the resource descriptor is stored in an sgpr.
|
2016-02-12 05:45:07 +08:00
|
|
|
; CHECK-LABEL: {{^}}mimg_srsrc_sgpr:
|
2016-02-26 17:51:05 +08:00
|
|
|
; CHECK: image_sample v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] dmask:0x1
|
2019-07-13 04:12:17 +08:00
|
|
|
define amdgpu_ps void @mimg_srsrc_sgpr([34 x <8 x i32>] addrspace(4)* inreg %arg) #0 {
|
2017-02-16 10:01:13 +08:00
|
|
|
bb:
|
2017-02-22 08:02:21 +08:00
|
|
|
%tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
|
2018-02-14 02:00:25 +08:00
|
|
|
%tmp7 = getelementptr [34 x <8 x i32>], [34 x <8 x i32>] addrspace(4)* %arg, i32 0, i32 %tid
|
|
|
|
%tmp8 = load <8 x i32>, <8 x i32> addrspace(4)* %tmp7, align 32, !tbaa !0
|
AMDGPU: Convert test cases to the dimension-aware intrinsics
Summary:
Also explicitly port over some tests in llvm.amdgcn.image.* that were
missing. Some tests are removed because they no longer apply (i.e.
explicitly testing building an address vector via insertelement).
This is in preparation for the eventual removal of the old-style
intrinsics.
Some additional notes:
- constant-address-space-32bit.ll: change some GCN-NEXT to GCN because
the instruction schedule was subtly altered
- insert_vector_elt.ll: the old test didn't actually test anything,
because %tmp1 was not used; remove the load, because it doesn't work
(Because of the amdgpu_ps calling convention? In any case, it's
orthogonal to what the test claims to be testing.)
Change-Id: Idfa99b6512ad139e755e82b8b89548ab08f0afcf
Reviewers: arsenm, rampitec
Subscribers: MatzeB, qcolombet, kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D48018
llvm-svn: 335229
2018-06-21 21:37:19 +08:00
|
|
|
%tmp = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 15, float 7.500000e-01, float 2.500000e-01, <8 x i32> %tmp8, <4 x i32> undef, i1 0, i32 0, i32 0)
|
2017-03-22 00:24:12 +08:00
|
|
|
%tmp10 = extractelement <4 x float> %tmp, i32 0
|
2017-02-22 08:27:34 +08:00
|
|
|
%tmp12 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float undef, float %tmp10)
|
|
|
|
call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp12, <2 x half> undef, i1 true, i1 true) #0
|
2016-02-12 05:45:07 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2018-01-24 13:04:35 +08:00
|
|
|
; Check the sampler is stored in an sgpr.
|
2016-02-12 05:45:07 +08:00
|
|
|
; CHECK-LABEL: {{^}}mimg_ssamp_sgpr:
|
2016-02-26 17:51:05 +08:00
|
|
|
; CHECK: image_sample v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] dmask:0x1
|
2019-07-13 04:12:17 +08:00
|
|
|
define amdgpu_ps void @mimg_ssamp_sgpr([17 x <4 x i32>] addrspace(4)* inreg %arg) #0 {
|
2017-02-16 10:01:13 +08:00
|
|
|
bb:
|
2017-02-22 08:02:21 +08:00
|
|
|
%tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
|
2018-02-14 02:00:25 +08:00
|
|
|
%tmp7 = getelementptr [17 x <4 x i32>], [17 x <4 x i32>] addrspace(4)* %arg, i32 0, i32 %tid
|
|
|
|
%tmp8 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp7, align 16, !tbaa !0
|
AMDGPU: Convert test cases to the dimension-aware intrinsics
Summary:
Also explicitly port over some tests in llvm.amdgcn.image.* that were
missing. Some tests are removed because they no longer apply (i.e.
explicitly testing building an address vector via insertelement).
This is in preparation for the eventual removal of the old-style
intrinsics.
Some additional notes:
- constant-address-space-32bit.ll: change some GCN-NEXT to GCN because
the instruction schedule was subtly altered
- insert_vector_elt.ll: the old test didn't actually test anything,
because %tmp1 was not used; remove the load, because it doesn't work
(Because of the amdgpu_ps calling convention? In any case, it's
orthogonal to what the test claims to be testing.)
Change-Id: Idfa99b6512ad139e755e82b8b89548ab08f0afcf
Reviewers: arsenm, rampitec
Subscribers: MatzeB, qcolombet, kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D48018
llvm-svn: 335229
2018-06-21 21:37:19 +08:00
|
|
|
%tmp = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 15, float 7.500000e-01, float 2.500000e-01, <8 x i32> undef, <4 x i32> %tmp8, i1 0, i32 0, i32 0)
|
2017-03-22 00:24:12 +08:00
|
|
|
%tmp10 = extractelement <4 x float> %tmp, i32 0
|
2017-02-22 08:27:34 +08:00
|
|
|
%tmp12 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %tmp10, float undef)
|
|
|
|
call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp12, <2 x half> undef, i1 true, i1 true) #0
|
2016-02-12 05:45:07 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-02-22 08:02:21 +08:00
|
|
|
declare float @llvm.fabs.f32(float) #1
|
|
|
|
declare float @llvm.amdgcn.rsq.f32(float) #1
|
|
|
|
declare float @llvm.exp2.f32(float) #1
|
|
|
|
declare float @llvm.pow.f32(float, float) #1
|
|
|
|
declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #1
|
|
|
|
declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) #1
|
|
|
|
declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #1
|
|
|
|
declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
|
|
|
|
declare void @llvm.amdgcn.exp.compr.v2f16(i32, i32, <2 x half>, <2 x half>, i1, i1) #0
|
2017-02-22 08:27:34 +08:00
|
|
|
declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) #1
|
AMDGPU: Convert test cases to the dimension-aware intrinsics
Summary:
Also explicitly port over some tests in llvm.amdgcn.image.* that were
missing. Some tests are removed because they no longer apply (i.e.
explicitly testing building an address vector via insertelement).
This is in preparation for the eventual removal of the old-style
intrinsics.
Some additional notes:
- constant-address-space-32bit.ll: change some GCN-NEXT to GCN because
the instruction schedule was subtly altered
- insert_vector_elt.ll: the old test didn't actually test anything,
because %tmp1 was not used; remove the load, because it doesn't work
(Because of the amdgpu_ps calling convention? In any case, it's
orthogonal to what the test claims to be testing.)
Change-Id: Idfa99b6512ad139e755e82b8b89548ab08f0afcf
Reviewers: arsenm, rampitec
Subscribers: MatzeB, qcolombet, kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D48018
llvm-svn: 335229
2018-06-21 21:37:19 +08:00
|
|
|
declare <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #2
|
2019-01-18 06:47:26 +08:00
|
|
|
declare float @llvm.amdgcn.s.buffer.load.f32(<4 x i32>, i32, i32) #1
|
2017-02-22 08:02:21 +08:00
|
|
|
|
|
|
|
attributes #0 = { nounwind }
|
|
|
|
attributes #1 = { nounwind readnone }
|
2017-03-22 00:24:12 +08:00
|
|
|
attributes #2 = { nounwind readonly }
|
2016-01-23 13:42:43 +08:00
|
|
|
|
|
|
|
!0 = !{!1, !1, i64 0, i32 1}
|
2017-02-16 10:01:13 +08:00
|
|
|
!1 = !{!"const", !2}
|
|
|
|
!2 = !{!"tbaa root"}
|
|
|
|
!3 = !{!1, !1, i64 0}
|