forked from OSchip/llvm-project
203 lines
6.1 KiB
YAML
203 lines
6.1 KiB
YAML
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
|
# RUN: llc -march=amdgcn -mcpu=hawaii -mattr=+flat-for-global -run-pass=regbankselect %s -verify-machineinstrs -o - | FileCheck %s
|
|
|
|
--- |
|
|
define amdgpu_kernel void @load_constant(i32 addrspace(4)* %ptr0) {
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @load_constant_volatile(i32 addrspace(4)* %ptr0) {
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @load_global_uniform_invariant(i32 addrspace(1)* %ptr1) {
|
|
%tmp0 = load i32, i32 addrspace(1)* %ptr1
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @load_global_uniform_noclobber(i32 addrspace(1)* %ptr1) {
|
|
%tmp0 = load i32, i32 addrspace(1)* %ptr1, !amdgpu.noclobber !0
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @load_global_uniform_variant(i32 addrspace(1)* %ptr1) {
|
|
%tmp0 = load i32, i32 addrspace(1)* %ptr1
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @load_global_uniform_volatile_invariant(i32 addrspace(1)* %ptr1) {
|
|
%tmp0 = load i32, i32 addrspace(1)* %ptr1
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @load_global_uniform_atomic_invariant(i32 addrspace(1)* %ptr1) {
|
|
%tmp0 = load i32, i32 addrspace(1)* %ptr1
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @load_global_non_uniform(i32 addrspace(1)* %ptr2) {
|
|
%tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0
|
|
%tmp1 = getelementptr i32, i32 addrspace(1)* %ptr2, i32 %tmp0
|
|
%tmp2 = load i32, i32 addrspace(1)* %tmp1
|
|
ret void
|
|
}
|
|
|
|
define void @non_power_of_2() { ret void }
|
|
|
|
define amdgpu_kernel void @load_constant_v4i16_from_8_align8(<3 x i16> addrspace(4)* %ptr0) {
|
|
ret void
|
|
}
|
|
|
|
declare i32 @llvm.amdgcn.workitem.id.x() #0
|
|
attributes #0 = { nounwind readnone }
|
|
!0 = !{}
|
|
|
|
...
|
|
---
|
|
name: load_constant
|
|
legalized: true
|
|
|
|
body: |
|
|
bb.0:
|
|
liveins: $sgpr0_sgpr1
|
|
; CHECK-LABEL: name: load_constant
|
|
; CHECK: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
|
|
; CHECK: [[LOAD:%[0-9]+]]:sgpr(s32) = G_LOAD [[COPY]](p4) :: (load 4 from %ir.ptr0, addrspace 4)
|
|
%0:_(p4) = COPY $sgpr0_sgpr1
|
|
%1:_(s32) = G_LOAD %0 :: (load 4 from %ir.ptr0)
|
|
...
|
|
|
|
---
|
|
name: load_constant_volatile
|
|
legalized: true
|
|
|
|
body: |
|
|
bb.0:
|
|
liveins: $sgpr0_sgpr1
|
|
; CHECK-LABEL: name: load_constant_volatile
|
|
; CHECK: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
|
|
; CHECK: [[LOAD:%[0-9]+]]:sgpr(s32) = G_LOAD [[COPY]](p4) :: (volatile load 4 from %ir.ptr0, addrspace 4)
|
|
%0:_(p4) = COPY $sgpr0_sgpr1
|
|
%1:_(s32) = G_LOAD %0 :: (volatile load 4 from %ir.ptr0)
|
|
...
|
|
|
|
---
|
|
name: load_global_uniform_invariant
|
|
legalized: true
|
|
|
|
body: |
|
|
bb.0:
|
|
liveins: $sgpr0_sgpr1
|
|
; CHECK-LABEL: name: load_global_uniform_invariant
|
|
; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
|
|
; CHECK: [[LOAD:%[0-9]+]]:sgpr(s32) = G_LOAD [[COPY]](p1) :: (invariant load 4 from %ir.ptr1, addrspace 1)
|
|
%0:_(p1) = COPY $sgpr0_sgpr1
|
|
%1:_(s32) = G_LOAD %0 :: (invariant load 4 from %ir.ptr1)
|
|
...
|
|
|
|
---
|
|
name: load_global_uniform_noclobber
|
|
legalized: true
|
|
|
|
body: |
|
|
bb.0:
|
|
liveins: $sgpr0_sgpr1
|
|
; CHECK-LABEL: name: load_global_uniform_noclobber
|
|
; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
|
|
; CHECK: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
|
|
; CHECK: [[LOAD:%[0-9]+]]:vgpr(s32) = G_LOAD [[COPY1]](p1) :: (load 4 from %ir.ptr1, addrspace 1)
|
|
%0:_(p1) = COPY $sgpr0_sgpr1
|
|
%1:_(s32) = G_LOAD %0 :: (load 4 from %ir.ptr1)
|
|
...
|
|
|
|
---
|
|
name: load_global_uniform_variant
|
|
legalized: true
|
|
|
|
body: |
|
|
bb.0:
|
|
liveins: $sgpr0_sgpr1
|
|
; CHECK-LABEL: name: load_global_uniform_variant
|
|
; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
|
|
; CHECK: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
|
|
; CHECK: [[LOAD:%[0-9]+]]:vgpr(s32) = G_LOAD [[COPY1]](p1) :: (load 4 from %ir.ptr1, addrspace 1)
|
|
%0:_(p1) = COPY $sgpr0_sgpr1
|
|
%1:_(s32) = G_LOAD %0 :: (load 4 from %ir.ptr1)
|
|
...
|
|
|
|
---
|
|
name: load_global_uniform_volatile_invariant
|
|
legalized: true
|
|
|
|
body: |
|
|
bb.0:
|
|
liveins: $sgpr0_sgpr1
|
|
; CHECK-LABEL: name: load_global_uniform_volatile_invariant
|
|
; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
|
|
; CHECK: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
|
|
; CHECK: [[LOAD:%[0-9]+]]:vgpr(s32) = G_LOAD [[COPY1]](p1) :: (volatile invariant load 4 from %ir.ptr1, addrspace 1)
|
|
%0:_(p1) = COPY $sgpr0_sgpr1
|
|
%1:_(s32) = G_LOAD %0 :: (volatile invariant load 4 from %ir.ptr1)
|
|
...
|
|
|
|
---
|
|
name: load_global_uniform_atomic_invariant
|
|
legalized: true
|
|
|
|
body: |
|
|
bb.0:
|
|
liveins: $sgpr0_sgpr1
|
|
; CHECK-LABEL: name: load_global_uniform_atomic_invariant
|
|
; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
|
|
; CHECK: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
|
|
; CHECK: [[LOAD:%[0-9]+]]:vgpr(s32) = G_LOAD [[COPY1]](p1) :: (invariant load acquire 4 from %ir.ptr1, addrspace 1)
|
|
%0:_(p1) = COPY $sgpr0_sgpr1
|
|
%1:_(s32) = G_LOAD %0 :: (invariant load acquire 4 from %ir.ptr1)
|
|
...
|
|
|
|
---
|
|
name: load_global_non_uniform
|
|
legalized: true
|
|
|
|
body: |
|
|
bb.0:
|
|
liveins: $sgpr0_sgpr1
|
|
; CHECK-LABEL: name: load_global_non_uniform
|
|
; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
|
|
; CHECK: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
|
|
; CHECK: [[LOAD:%[0-9]+]]:vgpr(s32) = G_LOAD [[COPY1]](p1) :: (load 4 from %ir.tmp1, addrspace 1)
|
|
%0:_(p1) = COPY $sgpr0_sgpr1
|
|
%1:_(s32) = G_LOAD %0 :: (load 4 from %ir.tmp1)
|
|
...
|
|
|
|
---
|
|
name: non_power_of_2
|
|
legalized: true
|
|
|
|
body: |
|
|
bb.0:
|
|
; CHECK-LABEL: name: non_power_of_2
|
|
; CHECK: [[DEF:%[0-9]+]]:sgpr(s448) = G_IMPLICIT_DEF
|
|
; CHECK: [[EXTRACT:%[0-9]+]]:sgpr(s32) = G_EXTRACT [[DEF]](s448), 0
|
|
; CHECK: $sgpr0 = COPY [[EXTRACT]](s32)
|
|
; CHECK: SI_RETURN_TO_EPILOG $sgpr0
|
|
%0:_(s448) = G_IMPLICIT_DEF
|
|
%1:_(s32) = G_EXTRACT %0:_(s448), 0
|
|
$sgpr0 = COPY %1:_(s32)
|
|
SI_RETURN_TO_EPILOG $sgpr0
|
|
...
|
|
|
|
---
|
|
name: load_constant_v4i16_from_8_align8
|
|
legalized: true
|
|
|
|
body: |
|
|
bb.0:
|
|
; CHECK-LABEL: name: load_constant_v4i16_from_8_align8
|
|
; CHECK: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
|
|
; CHECK: [[LOAD:%[0-9]+]]:sgpr(<4 x s16>) = G_LOAD [[COPY]](p4) :: (load 8 from %ir.ptr0, addrspace 4)
|
|
%0:_(p4) = COPY $sgpr0_sgpr1
|
|
%1:_(<4 x s16>) = G_LOAD %0 :: (load 8 from %ir.ptr0, align 8, addrspace 4)
|
|
|
|
...
|