2019-09-19 12:29:20 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
|
|
|
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -verify-machineinstrs -o - %s | FileCheck %s
|
|
|
|
; FIXME: Test with SI when argument lowering not broken for f16
|
|
|
|
|
|
|
|
; Natural mapping
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
; Copies for VGPR arguments
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__sgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 inreg %voffset, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__sgpr_voffset__sgpr_soffset
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr7
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY6]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
; Waterfall for rsrc
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__sgpr_soffset(<4 x i32> %rsrc, i32 %voffset, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__sgpr_soffset
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: successors: %bb.2(0x80000000)
|
|
|
|
; CHECK: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
|
|
|
|
; CHECK: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
|
|
|
|
; CHECK: [[S_MOV_B64_term:%[0-9]+]]:sreg_64_xexec = S_MOV_B64_term $exec
|
|
|
|
; CHECK: bb.2:
|
|
|
|
; CHECK: successors: %bb.3(0x40000000), %bb.2(0x40000000)
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]].sub0, implicit $exec
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]].sub1, implicit $exec
|
|
|
|
; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
|
|
|
|
; CHECK: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY6]], implicit $exec
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub0, implicit $exec
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub1, implicit $exec
|
|
|
|
; CHECK: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
|
|
|
|
; CHECK: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY7]], implicit $exec
|
|
|
|
; CHECK: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc
|
|
|
|
; CHECK: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY4]], [[REG_SEQUENCE3]], [[COPY5]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
|
|
|
|
; CHECK: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_]], implicit-def $exec, implicit-def $scc, implicit $exec
|
|
|
|
; CHECK: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc
|
|
|
|
; CHECK: S_CBRANCH_EXECNZ %bb.2, implicit $exec
|
|
|
|
; CHECK: bb.3:
|
|
|
|
; CHECK: successors: %bb.4(0x80000000)
|
|
|
|
; CHECK: $exec = S_MOV_B64_term [[S_MOV_B64_term]]
|
|
|
|
; CHECK: bb.4:
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
; Waterfall for rsrc and soffset
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__vgpr_soffset(<4 x i32> %rsrc, i32 %voffset, i32 %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__vgpr_soffset
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: successors: %bb.2(0x80000000)
|
|
|
|
; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
|
|
|
|
; CHECK: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
|
|
|
|
; CHECK: [[S_MOV_B64_term:%[0-9]+]]:sreg_64_xexec = S_MOV_B64_term $exec
|
|
|
|
; CHECK: bb.2:
|
|
|
|
; CHECK: successors: %bb.3(0x40000000), %bb.2(0x40000000)
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]].sub0, implicit $exec
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]].sub1, implicit $exec
|
|
|
|
; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
|
|
|
|
; CHECK: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY6]], implicit $exec
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub0, implicit $exec
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub1, implicit $exec
|
|
|
|
; CHECK: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
|
|
|
|
; CHECK: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY7]], implicit $exec
|
|
|
|
; CHECK: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc
|
|
|
|
; CHECK: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
|
|
|
|
; CHECK: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY5]], implicit $exec
|
|
|
|
; CHECK: [[S_AND_B64_1:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U32_e64_]], [[S_AND_B64_]], implicit-def $scc
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
|
|
|
|
; CHECK: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_1]], implicit-def $exec, implicit-def $scc, implicit $exec
|
|
|
|
; CHECK: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc
|
|
|
|
; CHECK: S_CBRANCH_EXECNZ %bb.2, implicit $exec
|
|
|
|
; CHECK: bb.3:
|
|
|
|
; CHECK: successors: %bb.4(0x80000000)
|
|
|
|
; CHECK: $exec = S_MOV_B64_term [[S_MOV_B64_term]]
|
|
|
|
; CHECK: bb.4:
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
; Natural mapping + glc
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_glc(<4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_glc
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 1, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 1)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
; Natural mapping + slc
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc(<4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 1, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 2)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
; Natural mapping + dlc
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_dlc(<4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_dlc
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, 0, 1, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 4)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
; Natural mapping + slc + dlc
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc_dlc(<4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc_dlc
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 1, 0, 1, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 6)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
; Natural mapping + glc + dlc
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_glc_dlc(<4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_glc_dlc
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 1, 0, 0, 1, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 5)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
; Natural mapping + glc + slc + dlc
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_glc_slc_dlc(<4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_glc_slc_dlc
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 1, 1, 0, 1, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 7)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
; Natural mapping
|
|
|
|
define amdgpu_ps <2 x float> @raw_buffer_load_v2f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_v2f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORDX2_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 8 from custom "TargetCustom7", align 1, addrspace 4)
|
|
|
|
; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_OFFEN]].sub0
|
|
|
|
; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_OFFEN]].sub1
|
|
|
|
; CHECK: $vgpr0 = COPY [[COPY6]]
|
|
|
|
; CHECK: $vgpr1 = COPY [[COPY7]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
|
|
|
|
%val = call <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
ret <2 x float> %val
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_ps <3 x float> @raw_buffer_load_v3f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_v3f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORDX3_OFFEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 12 from custom "TargetCustom7", align 1, addrspace 4)
|
|
|
|
; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_OFFEN]].sub0
|
|
|
|
; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_OFFEN]].sub1
|
|
|
|
; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_OFFEN]].sub2
|
|
|
|
; CHECK: $vgpr0 = COPY [[COPY6]]
|
|
|
|
; CHECK: $vgpr1 = COPY [[COPY7]]
|
|
|
|
; CHECK: $vgpr2 = COPY [[COPY8]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
|
|
|
|
%val = call <3 x float> @llvm.amdgcn.raw.buffer.load.v3f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
ret <3 x float> %val
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_ps <4 x float> @raw_buffer_load_v4f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_v4f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORDX4_OFFEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from custom "TargetCustom7", align 1, addrspace 4)
|
|
|
|
; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_OFFEN]].sub0
|
|
|
|
; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_OFFEN]].sub1
|
|
|
|
; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_OFFEN]].sub2
|
|
|
|
; CHECK: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_OFFEN]].sub3
|
|
|
|
; CHECK: $vgpr0 = COPY [[COPY6]]
|
|
|
|
; CHECK: $vgpr1 = COPY [[COPY7]]
|
|
|
|
; CHECK: $vgpr2 = COPY [[COPY8]]
|
|
|
|
; CHECK: $vgpr3 = COPY [[COPY9]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
|
|
|
|
%val = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
ret <4 x float> %val
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_ps half @raw_buffer_load_f16__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f16__sgpr_rsrc__vgpr_voffset__sgpr_soffset
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_USHORT_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 2 from custom "TargetCustom7", align 1, addrspace 4)
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%val = call half @llvm.amdgcn.raw.buffer.load.f16(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
ret half %val
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_ps <2 x half> @raw_buffer_load_v2f16__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_v2f16__sgpr_rsrc__vgpr_voffset__sgpr_soffset
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%val = call <2 x half> @llvm.amdgcn.raw.buffer.load.v2f16(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
ret <2 x half> %val
|
|
|
|
}
|
|
|
|
|
|
|
|
; FIXME: Crashes
|
|
|
|
; define amdgpu_ps <3 x half> @raw_buffer_load_v3f16__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
|
|
|
|
; %val = call <3 x half> @llvm.amdgcn.raw.buffer.load.v3f16(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
; ret <3 x half> %val
|
|
|
|
; }
|
|
|
|
|
|
|
|
define amdgpu_ps <4 x half> @raw_buffer_load_v4f16__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_v4f16__sgpr_rsrc__vgpr_voffset__sgpr_soffset
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORDX2_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 8 from custom "TargetCustom7", align 1, addrspace 4)
|
|
|
|
; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_OFFEN]].sub0
|
|
|
|
; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_OFFEN]].sub1
|
|
|
|
; CHECK: $vgpr0 = COPY [[COPY6]]
|
|
|
|
; CHECK: $vgpr1 = COPY [[COPY7]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
|
|
|
|
%val = call <4 x half> @llvm.amdgcn.raw.buffer.load.v4f16(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
ret <4 x half> %val
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_ps float @raw_buffer_load_i8__sgpr_rsrc__vgpr_voffset__sgpr_soffset_zext(<4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_i8__sgpr_rsrc__vgpr_voffset__sgpr_soffset_zext
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_UBYTE_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 1 from custom "TargetCustom7", addrspace 4)
|
2020-10-13 21:40:27 +08:00
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_OFFEN]]
|
2019-09-19 12:29:20 +08:00
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%val = call i8 @llvm.amdgcn.raw.buffer.load.i8(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
%zext = zext i8 %val to i32
|
|
|
|
%cast = bitcast i32 %zext to float
|
|
|
|
ret float %cast
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_ps float @raw_buffer_load_i8__sgpr_rsrc__vgpr_voffset__sgpr_soffset_sext(<4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_i8__sgpr_rsrc__vgpr_voffset__sgpr_soffset_sext
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_UBYTE_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 1 from custom "TargetCustom7", addrspace 4)
|
2020-01-25 08:38:53 +08:00
|
|
|
; CHECK: [[V_BFE_I32_:%[0-9]+]]:vgpr_32 = V_BFE_I32 [[BUFFER_LOAD_UBYTE_OFFEN]], 0, 8, implicit $exec
|
|
|
|
; CHECK: $vgpr0 = COPY [[V_BFE_I32_]]
|
2019-09-19 12:29:20 +08:00
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%val = call i8 @llvm.amdgcn.raw.buffer.load.i8(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
%zext = sext i8 %val to i32
|
|
|
|
%cast = bitcast i32 %zext to float
|
|
|
|
ret float %cast
|
|
|
|
}
|
|
|
|
|
|
|
|
; Waterfall for rsrc
|
|
|
|
define amdgpu_ps half @raw_buffer_load_f16__vgpr_rsrc__vgpr_voffset__sgpr_soffset(<4 x i32> %rsrc, i32 %voffset, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f16__vgpr_rsrc__vgpr_voffset__sgpr_soffset
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: successors: %bb.2(0x80000000)
|
|
|
|
; CHECK: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
|
|
|
|
; CHECK: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
|
|
|
|
; CHECK: [[S_MOV_B64_term:%[0-9]+]]:sreg_64_xexec = S_MOV_B64_term $exec
|
|
|
|
; CHECK: bb.2:
|
|
|
|
; CHECK: successors: %bb.3(0x40000000), %bb.2(0x40000000)
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]].sub0, implicit $exec
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]].sub1, implicit $exec
|
|
|
|
; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
|
|
|
|
; CHECK: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY6]], implicit $exec
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub0, implicit $exec
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub1, implicit $exec
|
|
|
|
; CHECK: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
|
|
|
|
; CHECK: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY7]], implicit $exec
|
|
|
|
; CHECK: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc
|
|
|
|
; CHECK: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_USHORT_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_OFFEN [[COPY4]], [[REG_SEQUENCE3]], [[COPY5]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 2 from custom "TargetCustom7", align 1, addrspace 4)
|
|
|
|
; CHECK: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_]], implicit-def $exec, implicit-def $scc, implicit $exec
|
|
|
|
; CHECK: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc
|
|
|
|
; CHECK: S_CBRANCH_EXECNZ %bb.2, implicit $exec
|
|
|
|
; CHECK: bb.3:
|
|
|
|
; CHECK: successors: %bb.4(0x80000000)
|
|
|
|
; CHECK: $exec = S_MOV_B64_term [[S_MOV_B64_term]]
|
|
|
|
; CHECK: bb.4:
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%val = call half @llvm.amdgcn.raw.buffer.load.f16(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
ret half %val
|
|
|
|
}
|
|
|
|
|
|
|
|
; Waterfall for rsrc
|
|
|
|
define amdgpu_ps float @raw_buffer_load_i8__vgpr_rsrc__vgpr_voffset__sgpr_soffset(<4 x i32> %rsrc, i32 %voffset, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_i8__vgpr_rsrc__vgpr_voffset__sgpr_soffset
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: successors: %bb.2(0x80000000)
|
|
|
|
; CHECK: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
|
|
|
|
; CHECK: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
|
|
|
|
; CHECK: [[S_MOV_B64_term:%[0-9]+]]:sreg_64_xexec = S_MOV_B64_term $exec
|
|
|
|
; CHECK: bb.2:
|
|
|
|
; CHECK: successors: %bb.3(0x40000000), %bb.2(0x40000000)
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]].sub0, implicit $exec
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]].sub1, implicit $exec
|
|
|
|
; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
|
|
|
|
; CHECK: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY6]], implicit $exec
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub0, implicit $exec
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub1, implicit $exec
|
|
|
|
; CHECK: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
|
|
|
|
; CHECK: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY7]], implicit $exec
|
|
|
|
; CHECK: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc
|
|
|
|
; CHECK: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_UBYTE_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_OFFEN [[COPY4]], [[REG_SEQUENCE3]], [[COPY5]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 1 from custom "TargetCustom7", addrspace 4)
|
|
|
|
; CHECK: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_]], implicit-def $exec, implicit-def $scc, implicit $exec
|
|
|
|
; CHECK: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc
|
|
|
|
; CHECK: S_CBRANCH_EXECNZ %bb.2, implicit $exec
|
|
|
|
; CHECK: bb.3:
|
|
|
|
; CHECK: successors: %bb.4(0x80000000)
|
|
|
|
; CHECK: $exec = S_MOV_B64_term [[S_MOV_B64_term]]
|
|
|
|
; CHECK: bb.4:
|
2020-10-13 21:40:27 +08:00
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_OFFEN]]
|
2019-09-19 12:29:20 +08:00
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%val = call i8 @llvm.amdgcn.raw.buffer.load.i8(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
%zext = zext i8 %val to i32
|
|
|
|
%cast = bitcast i32 %zext to float
|
|
|
|
ret float %cast
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vdpr_voffset__sgpr_soffset__voffset0(<4 x i32> inreg %rsrc, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vdpr_voffset__sgpr_soffset__voffset0
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], [[COPY4]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFSET]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 0, i32 %soffset, i32 0)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset4095(<4 x i32> inreg %rsrc, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset4095
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[REG_SEQUENCE]], [[COPY4]], 4095, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7" + 4095, align 1, addrspace 4)
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFSET]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 4095, i32 %soffset, i32 0)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset4096(<4 x i32> inreg %rsrc, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset4096
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY5]], [[REG_SEQUENCE]], [[COPY4]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7" + 4096, align 1, addrspace 4)
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 4096, i32 %soffset, i32 0)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add16(<4 x i32> inreg %rsrc, i32 %voffset.base, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add16
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 16, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7" + 16, align 1, addrspace 4)
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%voffset = add i32 %voffset.base, 16
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095(<4 x i32> inreg %rsrc, i32 %voffset.base, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 4095, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7" + 4095, align 1, addrspace 4)
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%voffset = add i32 %voffset.base, 4095
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4096(<4 x i32> inreg %rsrc, i32 %voffset.base, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4096
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096
|
|
|
|
; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
|
2020-07-14 21:18:36 +08:00
|
|
|
; CHECK: %10:vgpr_32, dead %15:sreg_64_xexec = V_ADD_CO_U32_e64 [[COPY4]], [[COPY6]], 0, implicit $exec
|
2019-09-19 12:29:20 +08:00
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN %10, [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7" + 4096, align 1, addrspace 4)
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%voffset = add i32 %voffset.base, 4096
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset4095(<4 x i32> inreg %rsrc, i32 %voffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset4095
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 4095, i32 0)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset4096(<4 x i32> inreg %rsrc, i32 %voffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset4096
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 4096, i32 0)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add16(<4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset.base) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add16
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16
|
2020-01-25 03:01:09 +08:00
|
|
|
; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY5]], [[S_MOV_B32_]], implicit-def $scc
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_ADD_I32_]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
|
2019-09-19 12:29:20 +08:00
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%soffset = add i32 %soffset.base, 16
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add4095(<4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset.base) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add4095
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095
|
2020-01-25 03:01:09 +08:00
|
|
|
; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY5]], [[S_MOV_B32_]], implicit-def $scc
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_ADD_I32_]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
|
2019-09-19 12:29:20 +08:00
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%soffset = add i32 %soffset.base, 4095
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add4096(<4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset.base) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add4096
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096
|
2020-01-25 03:01:09 +08:00
|
|
|
; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY5]], [[S_MOV_B32_]], implicit-def $scc
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[S_ADD_I32_]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
|
2019-09-19 12:29:20 +08:00
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%soffset = add i32 %soffset.base, 4096
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
; An add of the offset is necessary, with a waterfall loop. Make sure the add is done outside of the waterfall loop.
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add5000(<4 x i32> %rsrc, i32 %voffset, i32 inreg %soffset.base) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add5000
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: successors: %bb.2(0x80000000)
|
|
|
|
; CHECK: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000
|
2020-01-25 03:01:09 +08:00
|
|
|
; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY5]], [[S_MOV_B32_]], implicit-def $scc
|
2019-09-19 12:29:20 +08:00
|
|
|
; CHECK: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
|
|
|
|
; CHECK: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
|
|
|
|
; CHECK: [[S_MOV_B64_term:%[0-9]+]]:sreg_64_xexec = S_MOV_B64_term $exec
|
|
|
|
; CHECK: bb.2:
|
|
|
|
; CHECK: successors: %bb.3(0x40000000), %bb.2(0x40000000)
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]].sub0, implicit $exec
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]].sub1, implicit $exec
|
|
|
|
; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
|
|
|
|
; CHECK: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY6]], implicit $exec
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub0, implicit $exec
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub1, implicit $exec
|
|
|
|
; CHECK: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
|
|
|
|
; CHECK: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY7]], implicit $exec
|
|
|
|
; CHECK: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc
|
|
|
|
; CHECK: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
|
2020-01-25 03:01:09 +08:00
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN [[COPY4]], [[REG_SEQUENCE3]], [[S_ADD_I32_]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
|
2019-09-19 12:29:20 +08:00
|
|
|
; CHECK: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_]], implicit-def $exec, implicit-def $scc, implicit $exec
|
|
|
|
; CHECK: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc
|
|
|
|
; CHECK: S_CBRANCH_EXECNZ %bb.2, implicit $exec
|
|
|
|
; CHECK: bb.3:
|
|
|
|
; CHECK: successors: %bb.4(0x80000000)
|
|
|
|
; CHECK: $exec = S_MOV_B64_term [[S_MOV_B64_term]]
|
|
|
|
; CHECK: bb.4:
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%soffset = add i32 %soffset.base, 5000
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
; An add of the offset is necessary, with a waterfall loop. Make sure the add is done outside of the waterfall loop.
|
|
|
|
define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add5000(<4 x i32> %rsrc, i32 %voffset.base, i32 inreg %soffset) {
|
|
|
|
; CHECK-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add5000
|
|
|
|
; CHECK: bb.1 (%ir-block.0):
|
|
|
|
; CHECK: successors: %bb.2(0x80000000)
|
|
|
|
; CHECK: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
|
|
|
|
; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
|
|
; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
|
|
|
; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
|
|
|
|
; CHECK: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
|
|
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
|
|
|
|
; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
|
|
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
|
|
|
|
; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096
|
|
|
|
; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
|
2020-07-14 21:18:36 +08:00
|
|
|
; CHECK: %13:vgpr_32, dead %35:sreg_64_xexec = V_ADD_CO_U32_e64 [[COPY4]], [[COPY6]], 0, implicit $exec
|
2019-09-19 12:29:20 +08:00
|
|
|
; CHECK: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
|
|
|
|
; CHECK: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
|
|
|
|
; CHECK: [[S_MOV_B64_term:%[0-9]+]]:sreg_64_xexec = S_MOV_B64_term $exec
|
|
|
|
; CHECK: bb.2:
|
|
|
|
; CHECK: successors: %bb.3(0x40000000), %bb.2(0x40000000)
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub0, implicit $exec
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]].sub1, implicit $exec
|
|
|
|
; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
|
|
|
|
; CHECK: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY7]], implicit $exec
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub0, implicit $exec
|
|
|
|
; CHECK: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]].sub1, implicit $exec
|
|
|
|
; CHECK: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
|
|
|
|
; CHECK: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY8]], implicit $exec
|
|
|
|
; CHECK: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc
|
|
|
|
; CHECK: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
|
|
|
|
; CHECK: [[BUFFER_LOAD_DWORD_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN %13, [[REG_SEQUENCE3]], [[COPY5]], 904, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7" + 5000, align 1, addrspace 4)
|
|
|
|
; CHECK: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_]], implicit-def $exec, implicit-def $scc, implicit $exec
|
|
|
|
; CHECK: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc
|
|
|
|
; CHECK: S_CBRANCH_EXECNZ %bb.2, implicit $exec
|
|
|
|
; CHECK: bb.3:
|
|
|
|
; CHECK: successors: %bb.4(0x80000000)
|
|
|
|
; CHECK: $exec = S_MOV_B64_term [[S_MOV_B64_term]]
|
|
|
|
; CHECK: bb.4:
|
|
|
|
; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
|
|
|
|
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
|
|
|
|
%voffset = add i32 %voffset.base, 5000
|
|
|
|
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
|
|
|
|
ret float %val
|
|
|
|
}
|
|
|
|
|
|
|
|
declare float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32>, i32, i32, i32 immarg)
|
|
|
|
declare <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32>, i32, i32, i32 immarg)
|
|
|
|
declare <3 x float> @llvm.amdgcn.raw.buffer.load.v3f32(<4 x i32>, i32, i32, i32 immarg)
|
|
|
|
declare <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32>, i32, i32, i32 immarg)
|
|
|
|
|
|
|
|
declare half @llvm.amdgcn.raw.buffer.load.f16(<4 x i32>, i32, i32, i32 immarg)
|
|
|
|
declare <2 x half> @llvm.amdgcn.raw.buffer.load.v2f16(<4 x i32>, i32, i32, i32 immarg)
|
|
|
|
declare <3 x half> @llvm.amdgcn.raw.buffer.load.v3f16(<4 x i32>, i32, i32, i32 immarg)
|
|
|
|
declare <4 x half> @llvm.amdgcn.raw.buffer.load.v4f16(<4 x i32>, i32, i32, i32 immarg)
|
|
|
|
|
|
|
|
declare i8 @llvm.amdgcn.raw.buffer.load.i8(<4 x i32>, i32, i32, i32 immarg)
|