|
|
|
@ -8,10 +8,10 @@ define amdgpu_kernel void @i8_arg(i32 addrspace(1)* nocapture %out, i8 %in) noun
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i8 addrspace(4)* undef`, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s8)
|
|
|
|
|
; HSA-VI: G_STORE [[ZEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
@ -27,10 +27,10 @@ define amdgpu_kernel void @i8_zext_arg(i32 addrspace(1)* nocapture %out, i8 zero
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i8 addrspace(4)* undef`, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s8)
|
|
|
|
|
; HSA-VI: G_STORE [[ZEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
@ -46,10 +46,10 @@ define amdgpu_kernel void @i8_sext_arg(i32 addrspace(1)* nocapture %out, i8 sign
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i8 addrspace(4)* undef`, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD1]](s8)
|
|
|
|
|
; HSA-VI: G_STORE [[SEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
@ -65,10 +65,10 @@ define amdgpu_kernel void @i16_arg(i32 addrspace(1)* nocapture %out, i16 %in) no
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 2 from `i16 addrspace(4)* undef`, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 2, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s16)
|
|
|
|
|
; HSA-VI: G_STORE [[ZEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
@ -84,10 +84,10 @@ define amdgpu_kernel void @i16_zext_arg(i32 addrspace(1)* nocapture %out, i16 ze
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 2 from `i16 addrspace(4)* undef`, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 2, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s16)
|
|
|
|
|
; HSA-VI: G_STORE [[ZEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
@ -103,10 +103,10 @@ define amdgpu_kernel void @i16_sext_arg(i32 addrspace(1)* nocapture %out, i16 si
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 2 from `i16 addrspace(4)* undef`, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 2, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD1]](s16)
|
|
|
|
|
; HSA-VI: G_STORE [[SEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
@ -122,10 +122,10 @@ define amdgpu_kernel void @i32_arg(i32 addrspace(1)* nocapture %out, i32 %in) no
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 4 from `i32 addrspace(4)* undef`, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 4, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -140,10 +140,10 @@ define amdgpu_kernel void @f32_arg(float addrspace(1)* nocapture %out, float %in
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `float addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 4 from `float addrspace(4)* undef`, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 4, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -158,10 +158,10 @@ define amdgpu_kernel void @v2i8_arg(<2 x i8> addrspace(1)* %out, <2 x i8> %in) {
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<2 x i8> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<2 x s8>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 2 from `<2 x i8> addrspace(4)* undef`, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<2 x s8>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 2, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](<2 x s8>), [[LOAD]](p1) :: (store 2 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -176,10 +176,10 @@ define amdgpu_kernel void @v2i16_arg(<2 x i16> addrspace(1)* %out, <2 x i16> %in
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<2 x i16> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 4 from `<2 x i16> addrspace(4)* undef`, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 4, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](<2 x s16>), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -194,10 +194,10 @@ define amdgpu_kernel void @v2i32_arg(<2 x i32> addrspace(1)* nocapture %out, <2
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<2 x i32> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8 from `<2 x i32> addrspace(4)* undef`, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](<2 x s32>), [[LOAD]](p1) :: (store 8 into %ir.out, align 4, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -212,10 +212,10 @@ define amdgpu_kernel void @v2f32_arg(<2 x float> addrspace(1)* nocapture %out, <
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<2 x float> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8 from `<2 x float> addrspace(4)* undef`, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](<2 x s32>), [[LOAD]](p1) :: (store 8 into %ir.out, align 4, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -230,10 +230,10 @@ define amdgpu_kernel void @v3i8_arg(<3 x i8> addrspace(1)* nocapture %out, <3 x
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<3 x i8> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<3 x s8>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 3 from `<3 x i8> addrspace(4)* undef`, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<3 x s8>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 3, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](<3 x s8>), [[LOAD]](p1) :: (store 3 into %ir.out, align 4, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -248,10 +248,10 @@ define amdgpu_kernel void @v3i16_arg(<3 x i16> addrspace(1)* nocapture %out, <3
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<3 x i16> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<3 x s16>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 6 from `<3 x i16> addrspace(4)* undef`, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<3 x s16>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 6, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](<3 x s16>), [[LOAD]](p1) :: (store 6 into %ir.out, align 4, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -266,10 +266,10 @@ define amdgpu_kernel void @v3i32_arg(<3 x i32> addrspace(1)* nocapture %out, <3
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<3 x i32> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 12 from `<3 x i32> addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 12, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](<3 x s32>), [[LOAD]](p1) :: (store 12 into %ir.out, align 4, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -284,10 +284,10 @@ define amdgpu_kernel void @v3f32_arg(<3 x float> addrspace(1)* nocapture %out, <
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<3 x float> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 12 from `<3 x float> addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 12, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](<3 x s32>), [[LOAD]](p1) :: (store 12 into %ir.out, align 4, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -302,10 +302,10 @@ define amdgpu_kernel void @v4i8_arg(<4 x i8> addrspace(1)* %out, <4 x i8> %in) {
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<4 x i8> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<4 x s8>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 4 from `<4 x i8> addrspace(4)* undef`, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<4 x s8>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 4, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](<4 x s8>), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -320,10 +320,10 @@ define amdgpu_kernel void @v4i16_arg(<4 x i16> addrspace(1)* %out, <4 x i16> %in
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<4 x i16> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8 from `<4 x i16> addrspace(4)* undef`, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](<4 x s16>), [[LOAD]](p1) :: (store 8 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -338,10 +338,10 @@ define amdgpu_kernel void @v4i32_arg(<4 x i32> addrspace(1)* nocapture %out, <4
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<4 x i32> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 16 from `<4 x i32> addrspace(4)* undef`, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 16, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](<4 x s32>), [[LOAD]](p1) :: (store 16 into %ir.out, align 4, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -356,10 +356,10 @@ define amdgpu_kernel void @v4f32_arg(<4 x float> addrspace(1)* nocapture %out, <
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<4 x float> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 16 from `<4 x float> addrspace(4)* undef`, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 16, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](<4 x s32>), [[LOAD]](p1) :: (store 16 into %ir.out, align 4, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -374,10 +374,10 @@ define amdgpu_kernel void @v8i8_arg(<8 x i8> addrspace(1)* %out, <8 x i8> %in) {
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<8 x i8> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<8 x s8>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8 from `<8 x i8> addrspace(4)* undef`, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<8 x s8>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](<8 x s8>), [[LOAD]](p1) :: (store 8 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -392,10 +392,10 @@ define amdgpu_kernel void @v8i16_arg(<8 x i16> addrspace(1)* %out, <8 x i16> %in
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<8 x i16> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 16 from `<8 x i16> addrspace(4)* undef`, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 16, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](<8 x s16>), [[LOAD]](p1) :: (store 16 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -410,10 +410,10 @@ define amdgpu_kernel void @v8i32_arg(<8 x i32> addrspace(1)* nocapture %out, <8
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<8 x i32> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<8 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 32 from `<8 x i32> addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<8 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 32, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](<8 x s32>), [[LOAD]](p1) :: (store 32 into %ir.out, align 4, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -428,10 +428,10 @@ define amdgpu_kernel void @v8f32_arg(<8 x float> addrspace(1)* nocapture %out, <
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<8 x float> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<8 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 32 from `<8 x float> addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<8 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 32, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](<8 x s32>), [[LOAD]](p1) :: (store 32 into %ir.out, align 4, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -446,10 +446,10 @@ define amdgpu_kernel void @v16i8_arg(<16 x i8> addrspace(1)* %out, <16 x i8> %in
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<16 x i8> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 16 from `<16 x i8> addrspace(4)* undef`, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 16, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](<16 x s8>), [[LOAD]](p1) :: (store 16 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -464,10 +464,10 @@ define amdgpu_kernel void @v16i16_arg(<16 x i16> addrspace(1)* %out, <16 x i16>
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<16 x i16> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<16 x s16>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 32 from `<16 x i16> addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<16 x s16>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 32, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](<16 x s16>), [[LOAD]](p1) :: (store 32 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -482,10 +482,10 @@ define amdgpu_kernel void @v16i32_arg(<16 x i32> addrspace(1)* nocapture %out, <
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<16 x i32> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 64 from `<16 x i32> addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 64, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](<16 x s32>), [[LOAD]](p1) :: (store 64 into %ir.out, align 4, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -500,10 +500,10 @@ define amdgpu_kernel void @v16f32_arg(<16 x float> addrspace(1)* nocapture %out,
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<16 x float> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 64 from `<16 x float> addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 64, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](<16 x s32>), [[LOAD]](p1) :: (store 64 into %ir.out, align 4, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -518,10 +518,10 @@ define amdgpu_kernel void @kernel_arg_i64(i64 addrspace(1)* %out, i64 %a) nounwi
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i64 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8 from `i64 addrspace(4)* undef`, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](s64), [[LOAD]](p1) :: (store 8 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
store i64 %a, i64 addrspace(1)* %out, align 8
|
|
|
|
@ -535,10 +535,10 @@ define amdgpu_kernel void @f64_kernel_arg(double addrspace(1)* %out, double %in
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `double addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8 from `double addrspace(4)* undef`, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](s64), [[LOAD]](p1) :: (store 8 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
entry:
|
|
|
|
@ -553,10 +553,10 @@ define amdgpu_kernel void @i1_arg(i1 addrspace(1)* %out, i1 %x) nounwind {
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i1 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s1) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i1 addrspace(4)* undef`, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s1) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: G_STORE [[LOAD1]](s1), [[LOAD]](p1) :: (store 1 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|
store i1 %x, i1 addrspace(1)* %out, align 1
|
|
|
|
@ -570,10 +570,10 @@ define amdgpu_kernel void @i1_arg_zext_i32(i32 addrspace(1)* %out, i1 %x) nounwi
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s1) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i1 addrspace(4)* undef`, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s1) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s1)
|
|
|
|
|
; HSA-VI: G_STORE [[ZEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
@ -589,10 +589,10 @@ define amdgpu_kernel void @i1_arg_zext_i64(i64 addrspace(1)* %out, i1 %x) nounwi
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i64 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s1) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i1 addrspace(4)* undef`, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s1) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[LOAD1]](s1)
|
|
|
|
|
; HSA-VI: G_STORE [[ZEXT]](s64), [[LOAD]](p1) :: (store 8 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
@ -608,10 +608,10 @@ define amdgpu_kernel void @i1_arg_sext_i32(i32 addrspace(1)* %out, i1 %x) nounwi
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s1) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i1 addrspace(4)* undef`, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s1) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD1]](s1)
|
|
|
|
|
; HSA-VI: G_STORE [[SEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
@ -627,10 +627,10 @@ define amdgpu_kernel void @i1_arg_sext_i64(i64 addrspace(1)* %out, i1 %x) nounwi
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i64 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s1) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i1 addrspace(4)* undef`, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s1) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[LOAD1]](s1)
|
|
|
|
|
; HSA-VI: G_STORE [[SEXT]](s64), [[LOAD]](p1) :: (store 8 into %ir.out, addrspace 1)
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
@ -674,15 +674,15 @@ define amdgpu_kernel void @struct_argument_alignment({i32, i64} %arg0, i8, {i32,
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 16 from `{ i32, i64 } addrspace(4)* undef`, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[LOAD]](s128), 0
|
|
|
|
|
; HSA-VI: [[EXTRACT1:%[0-9]+]]:_(s64) = G_EXTRACT [[LOAD]](s128), 64
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i8 addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
|
|
|
|
|
; HSA-VI: [[GEP2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP2]](p4) :: (dereferenceable invariant load 16 from `{ i32, i64 } addrspace(4)* undef`, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP2]](p4) :: (dereferenceable invariant load 16, align 8, addrspace 4)
|
|
|
|
|
; HSA-VI: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[LOAD2]](s128), 0
|
|
|
|
|
; HSA-VI: [[EXTRACT3:%[0-9]+]]:_(s64) = G_EXTRACT [[LOAD2]](s128), 64
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
@ -706,15 +706,15 @@ define amdgpu_kernel void @packed_struct_argument_alignment(<{i32, i64}> %arg0,
|
|
|
|
|
; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
|
|
|
|
|
; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
|
|
|
|
; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(s96) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 12 from `<{ i32, i64 }> addrspace(4)* undef`, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD:%[0-9]+]]:_(s96) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 12, align 16, addrspace 4)
|
|
|
|
|
; HSA-VI: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[LOAD]](s96), 0
|
|
|
|
|
; HSA-VI: [[EXTRACT1:%[0-9]+]]:_(s64) = G_EXTRACT [[LOAD]](s96), 32
|
|
|
|
|
; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
|
|
|
|
|
; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i8 addrspace(4)* undef`, align 4, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1, align 4, addrspace 4)
|
|
|
|
|
; HSA-VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 13
|
|
|
|
|
; HSA-VI: [[GEP2:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
|
|
|
|
|
; HSA-VI: [[LOAD2:%[0-9]+]]:_(s96) = G_LOAD [[GEP2]](p4) :: (dereferenceable invariant load 12 from `<{ i32, i64 }> addrspace(4)* undef`, align 1, addrspace 4)
|
|
|
|
|
; HSA-VI: [[LOAD2:%[0-9]+]]:_(s96) = G_LOAD [[GEP2]](p4) :: (dereferenceable invariant load 12, align 1, addrspace 4)
|
|
|
|
|
; HSA-VI: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[LOAD2]](s96), 0
|
|
|
|
|
; HSA-VI: [[EXTRACT3:%[0-9]+]]:_(s64) = G_EXTRACT [[LOAD2]](s96), 32
|
|
|
|
|
; HSA-VI: S_ENDPGM 0
|
|
|
|
|