AMDGPU: Remove unnecessary IR from MIR tests

llvm-svn: 307311
This commit is contained in:
Matt Arsenault 2017-07-06 20:56:57 +00:00
parent c58b7c5973
commit 60b91e0ba2
7 changed files with 113 additions and 358 deletions

View File

@ -1,36 +1,4 @@
# RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-fold-operands %s -o - | FileCheck -check-prefix=GCN %s
--- |
define amdgpu_ps void @v_max_self_clamp_not_set_f32() #0 {
ret void
}
define amdgpu_ps void @v_clamp_omod_already_set_f32() #0 {
ret void
}
define amdgpu_ps void @v_omod_mul_omod_already_set_f32() #0 {
ret void
}
define amdgpu_ps void @v_omod_mul_clamp_already_set_f32() #0 {
ret void
}
define amdgpu_ps void @v_omod_add_omod_already_set_f32() #0 {
ret void
}
define amdgpu_ps void @v_omod_add_clamp_already_set_f32() #0 {
ret void
}
define amdgpu_ps void @v_max_reg_imm_f32() #0 {
ret void
}
attributes #0 = { nounwind "no-signed-zeros-fp-math"="false" }
...
---
# GCN-LABEL: name: v_max_self_clamp_not_set_f32
# GCN: %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
@ -70,7 +38,7 @@ liveins:
- { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
- { reg: '%vgpr0', virtual-reg: '%3' }
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
@ -132,7 +100,7 @@ liveins:
- { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
- { reg: '%vgpr0', virtual-reg: '%3' }
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
@ -195,7 +163,7 @@ liveins:
- { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
- { reg: '%vgpr0', virtual-reg: '%3' }
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
@ -260,7 +228,7 @@ liveins:
- { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
- { reg: '%vgpr0', virtual-reg: '%3' }
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
@ -337,7 +305,7 @@ liveins:
- { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
- { reg: '%vgpr0', virtual-reg: '%3' }
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
@ -402,7 +370,7 @@ liveins:
- { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
- { reg: '%vgpr0', virtual-reg: '%3' }
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
@ -435,7 +403,7 @@ registers:
- { id: 0, class: vgpr_32 }
- { id: 1, class: vgpr_32 }
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %vgpr0
%0 = COPY %vgpr0

View File

@ -1,84 +1,5 @@
# RUN: llc -mtriple=amdgcn--amdhsa -mcpu=hawaii -verify-machineinstrs -run-pass si-fold-operands,dead-mi-elimination -o - %s | FileCheck -check-prefix=GCN %s
--- |
define amdgpu_kernel void @s_fold_and_imm_regimm_32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%and = and i32 %a, 1234567
store volatile i32 %and, i32 addrspace(1)* %out
ret void
}
define amdgpu_kernel void @v_fold_and_imm_regimm_32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%idxprom = sext i32 %tid to i64
%gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i64 %idxprom
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i64 %idxprom
%a = load i32, i32 addrspace(1)* %gep.a
%and = and i32 %a, 1234567
store i32 %and, i32 addrspace(1)* %gep.out
ret void
}
define amdgpu_kernel void @s_fold_shl_imm_regimm_32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%shl = shl i32 %a, 12
store volatile i32 %shl, i32 addrspace(1)* %out
ret void
}
define amdgpu_kernel void @v_fold_shl_imm_regimm_32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%idxprom = sext i32 %tid to i64
%gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i64 %idxprom
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i64 %idxprom
%a = load i32, i32 addrspace(1)* %gep.a
%shl = shl i32 %a, 12
store i32 %shl, i32 addrspace(1)* %gep.out
ret void
}
define amdgpu_kernel void @s_fold_ashr_imm_regimm_32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%ashr = ashr i32 %a, 12
store volatile i32 %ashr, i32 addrspace(1)* %out
ret void
}
define amdgpu_kernel void @v_fold_ashr_imm_regimm_32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%idxprom = sext i32 %tid to i64
%gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i64 %idxprom
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i64 %idxprom
%a = load i32, i32 addrspace(1)* %gep.a
%ashr = ashr i32 %a, 12
store i32 %ashr, i32 addrspace(1)* %gep.out
ret void
}
define amdgpu_kernel void @s_fold_lshr_imm_regimm_32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%lshr = lshr i32 %a, 12
store volatile i32 %lshr, i32 addrspace(1)* %out
ret void
}
define amdgpu_kernel void @v_fold_lshr_imm_regimm_32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%idxprom = sext i32 %tid to i64
%gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i64 %idxprom
%gep.out = getelementptr i32, i32 addrspace(1)* %out, i64 %idxprom
%a = load i32, i32 addrspace(1)* %gep.a
%lshr = lshr i32 %a, 12
store i32 %lshr, i32 addrspace(1)* %gep.out
ret void
}
define amdgpu_kernel void @undefined_vreg_operand() {
unreachable
}
declare i32 @llvm.amdgcn.workitem.id.x() #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
...
---
# GCN-LABEL: name: s_fold_and_imm_regimm_32{{$}}
# GCN: %10 = V_MOV_B32_e32 1543, implicit %exec
@ -119,11 +40,11 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1
%0 = COPY %sgpr0_sgpr1
%1 = S_LOAD_DWORDX2_IMM %0, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%1 = S_LOAD_DWORDX2_IMM %0, 36, 0
%2 = COPY %1.sub1
%3 = COPY %1.sub0
%4 = S_MOV_B32 61440
@ -133,7 +54,7 @@ body: |
%8 = S_MOV_B32 9999
%9 = S_AND_B32 killed %7, killed %8, implicit-def dead %scc
%10 = COPY %9
BUFFER_STORE_DWORD_OFFSET killed %10, killed %6, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into %ir.out)
BUFFER_STORE_DWORD_OFFSET killed %10, killed %6, 0, 0, 0, 0, 0, implicit %exec
S_ENDPGM
...
@ -204,12 +125,12 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
%4 = S_LOAD_DWORDX2_IMM %0, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%4 = S_LOAD_DWORDX2_IMM %0, 36, 0
%31 = V_ASHRREV_I32_e64 31, %3, implicit %exec
%32 = REG_SEQUENCE %3, 1, %31, 2
%33 = V_LSHLREV_B64 2, killed %32, implicit %exec
@ -223,19 +144,19 @@ body: |
%34 = V_MOV_B32_e32 63, implicit %exec
%27 = V_AND_B32_e64 %26, %24, implicit %exec
FLAT_STORE_DWORD %37, %27, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %37, %27, 0, 0, 0, implicit %exec, implicit %flat_scr
%28 = V_AND_B32_e64 %24, %26, implicit %exec
FLAT_STORE_DWORD %37, %28, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %37, %28, 0, 0, 0, implicit %exec, implicit %flat_scr
%29 = V_AND_B32_e32 %26, %24, implicit %exec
FLAT_STORE_DWORD %37, %29, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %37, %29, 0, 0, 0, implicit %exec, implicit %flat_scr
%30 = V_AND_B32_e64 %26, %26, implicit %exec
FLAT_STORE_DWORD %37, %30, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %37, %30, 0, 0, 0, implicit %exec, implicit %flat_scr
%31 = V_AND_B32_e64 %34, %34, implicit %exec
FLAT_STORE_DWORD %37, %31, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %37, %31, 0, 0, 0, implicit %exec, implicit %flat_scr
S_ENDPGM
@ -285,11 +206,11 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1
%0 = COPY %sgpr0_sgpr1
%4 = S_LOAD_DWORDX2_IMM %0, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%4 = S_LOAD_DWORDX2_IMM %0, 36, 0
%5 = S_MOV_B32 1
%6 = COPY %4.sub1
%7 = COPY %4.sub0
@ -298,7 +219,7 @@ body: |
%10 = REG_SEQUENCE killed %7, 1, killed %6, 2, killed %9, 3, killed %8, 4
%12 = S_LSHL_B32 killed %5, 12, implicit-def dead %scc
%13 = COPY %12
BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into %ir.out)
BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit %exec
S_ENDPGM
...
@ -390,7 +311,7 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%2 = COPY %vgpr0
@ -411,34 +332,34 @@ body: |
%27 = S_MOV_B32 -4
%11 = V_LSHLREV_B32_e64 12, %10, implicit %exec
FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr
%12 = V_LSHLREV_B32_e64 %7, 12, implicit %exec
FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr
%13 = V_LSHL_B32_e64 %7, 12, implicit %exec
FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr
%14 = V_LSHL_B32_e64 12, %7, implicit %exec
FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr
%15 = V_LSHL_B32_e64 12, %24, implicit %exec
FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr
%22 = V_LSHL_B32_e64 %6, 12, implicit %exec
FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr
%23 = V_LSHL_B32_e64 %6, 32, implicit %exec
FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr
%25 = V_LSHL_B32_e32 %6, %6, implicit %exec
FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr
%26 = V_LSHLREV_B32_e32 11, %24, implicit %exec
FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr
%28 = V_LSHL_B32_e32 %27, %6, implicit %exec
FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr
S_ENDPGM
@ -485,11 +406,11 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1
%0 = COPY %sgpr0_sgpr1
%4 = S_LOAD_DWORDX2_IMM %0, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%4 = S_LOAD_DWORDX2_IMM %0, 36, 0
%5 = S_MOV_B32 999123
%6 = COPY %4.sub1
%7 = COPY %4.sub0
@ -498,7 +419,7 @@ body: |
%10 = REG_SEQUENCE killed %7, 1, killed %6, 2, killed %9, 3, killed %8, 4
%12 = S_ASHR_I32 killed %5, 12, implicit-def dead %scc
%13 = COPY %12
BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into %ir.out)
BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit %exec
S_ENDPGM
...
@ -593,12 +514,12 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%2 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
%3 = S_LOAD_DWORDX2_IMM %0, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%3 = S_LOAD_DWORDX2_IMM %0, 36, 0
%15 = V_ASHRREV_I32_e64 31, %2, implicit %exec
%16 = REG_SEQUENCE %2, 1, %15, 2
%17 = V_LSHLREV_B64 2, killed %16, implicit %exec
@ -619,34 +540,34 @@ body: |
%35 = V_MOV_B32_e32 2, implicit %exec
%11 = V_ASHRREV_I32_e64 8, %10, implicit %exec
FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr
%12 = V_ASHRREV_I32_e64 %8, %10, implicit %exec
FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr
%13 = V_ASHR_I32_e64 %7, 3, implicit %exec
FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr
%14 = V_ASHR_I32_e64 7, %32, implicit %exec
FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr
%15 = V_ASHR_I32_e64 %27, %24, implicit %exec
FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr
%22 = V_ASHR_I32_e64 %6, 4, implicit %exec
FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr
%23 = V_ASHR_I32_e64 %6, %33, implicit %exec
FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr
%25 = V_ASHR_I32_e32 %34, %34, implicit %exec
FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr
%26 = V_ASHRREV_I32_e32 11, %10, implicit %exec
FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr
%28 = V_ASHR_I32_e32 %27, %35, implicit %exec
FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr
S_ENDPGM
@ -693,11 +614,11 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1
%0 = COPY %sgpr0_sgpr1
%4 = S_LOAD_DWORDX2_IMM %0, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%4 = S_LOAD_DWORDX2_IMM %0, 36, 0
%5 = S_MOV_B32 -999123
%6 = COPY %4.sub1
%7 = COPY %4.sub0
@ -706,7 +627,7 @@ body: |
%10 = REG_SEQUENCE killed %7, 1, killed %6, 2, killed %9, 3, killed %8, 4
%12 = S_LSHR_B32 killed %5, 12, implicit-def dead %scc
%13 = COPY %12
BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into %ir.out)
BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit %exec
S_ENDPGM
...
@ -802,12 +723,12 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%2 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
%3 = S_LOAD_DWORDX2_IMM %0, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%3 = S_LOAD_DWORDX2_IMM %0, 36, 0
%15 = V_ASHRREV_I32_e64 31, %2, implicit %exec
%16 = REG_SEQUENCE %2, 1, %15, 2
%17 = V_LSHLREV_B64 2, killed %16, implicit %exec
@ -828,34 +749,34 @@ body: |
%35 = V_MOV_B32_e32 2, implicit %exec
%11 = V_LSHRREV_B32_e64 8, %10, implicit %exec
FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr
%12 = V_LSHRREV_B32_e64 %8, %10, implicit %exec
FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr
%13 = V_LSHR_B32_e64 %7, 3, implicit %exec
FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr
%14 = V_LSHR_B32_e64 7, %32, implicit %exec
FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr
%15 = V_LSHR_B32_e64 %27, %24, implicit %exec
FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr
%22 = V_LSHR_B32_e64 %6, 4, implicit %exec
FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr
%23 = V_LSHR_B32_e64 %6, %33, implicit %exec
FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr
%25 = V_LSHR_B32_e32 %34, %34, implicit %exec
FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr
%26 = V_LSHRREV_B32_e32 11, %10, implicit %exec
FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr
%28 = V_LSHR_B32_e32 %27, %35, implicit %exec
FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store 4 into %ir.gep.out)
FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr
S_ENDPGM

View File

@ -1,14 +1,4 @@
# RUN: llc -march=amdgcn -run-pass detect-dead-lanes -o - %s | FileCheck %s
--- |
define amdgpu_kernel void @test0() { ret void }
define amdgpu_kernel void @test1() { ret void }
define amdgpu_kernel void @test2() { ret void }
define amdgpu_kernel void @test3() { ret void }
define amdgpu_kernel void @test4() { ret void }
define amdgpu_kernel void @test5() { ret void }
define amdgpu_kernel void @loop0() { ret void }
define amdgpu_kernel void @loop1() { ret void }
define amdgpu_kernel void @loop2() { ret void }
...
---
# Combined use/def transfer check, the basics.

View File

@ -1,26 +1,5 @@
# RUN: llc -march=amdgcn -run-pass peephole-opt -verify-machineinstrs %s -o - | FileCheck -check-prefix=GCN %s
--- |
define amdgpu_kernel void @no_fold_imm_madak_mac_clamp_f32() #0 {
ret void
}
define amdgpu_kernel void @no_fold_imm_madak_mac_omod_f32() #0 {
ret void
}
define amdgpu_kernel void @no_fold_imm_madak_mad_clamp_f32() #0 {
ret void
}
define amdgpu_kernel void @no_fold_imm_madak_mad_omod_f32() #0 {
ret void
}
attributes #0 = { nounwind }
...
---
# GCN-LABEL: name: no_fold_imm_madak_mac_clamp_f32
# GCN: %23 = V_MOV_B32_e32 1090519040, implicit %exec
# GCN-NEXT: %24 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit %exec
@ -62,14 +41,14 @@ liveins:
- { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
- { reg: '%vgpr0', virtual-reg: '%3' }
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
%4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%6 = S_LOAD_DWORDX2_IMM %0, 13, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%4 = S_LOAD_DWORDX2_IMM %0, 9, 0
%5 = S_LOAD_DWORDX2_IMM %0, 11, 0
%6 = S_LOAD_DWORDX2_IMM %0, 13, 0
%27 = V_ASHRREV_I32_e32 31, %3, implicit %exec
%28 = REG_SEQUENCE %3, 1, %27, 2
%11 = S_MOV_B32 61440
@ -133,14 +112,14 @@ liveins:
- { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
- { reg: '%vgpr0', virtual-reg: '%3' }
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
%4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%6 = S_LOAD_DWORDX2_IMM %0, 13, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%4 = S_LOAD_DWORDX2_IMM %0, 9, 0
%5 = S_LOAD_DWORDX2_IMM %0, 11, 0
%6 = S_LOAD_DWORDX2_IMM %0, 13, 0
%27 = V_ASHRREV_I32_e32 31, %3, implicit %exec
%28 = REG_SEQUENCE %3, 1, %27, 2
%11 = S_MOV_B32 61440
@ -204,14 +183,14 @@ liveins:
- { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
- { reg: '%vgpr0', virtual-reg: '%3' }
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
%4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%6 = S_LOAD_DWORDX2_IMM %0, 13, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%4 = S_LOAD_DWORDX2_IMM %0, 9, 0
%5 = S_LOAD_DWORDX2_IMM %0, 11, 0
%6 = S_LOAD_DWORDX2_IMM %0, 13, 0
%27 = V_ASHRREV_I32_e32 31, %3, implicit %exec
%28 = REG_SEQUENCE %3, 1, %27, 2
%11 = S_MOV_B32 61440
@ -275,14 +254,14 @@ liveins:
- { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
- { reg: '%vgpr0', virtual-reg: '%3' }
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
%4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%6 = S_LOAD_DWORDX2_IMM %0, 13, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%4 = S_LOAD_DWORDX2_IMM %0, 9, 0
%5 = S_LOAD_DWORDX2_IMM %0, 11, 0
%6 = S_LOAD_DWORDX2_IMM %0, 13, 0
%27 = V_ASHRREV_I32_e32 31, %3, implicit %exec
%28 = REG_SEQUENCE %3, 1, %27, 2
%11 = S_MOV_B32 61440

View File

@ -1,10 +1,4 @@
# RUN: llc -mtriple=amdgcn--amdhsa -mcpu=hawaii -verify-machineinstrs -run-pass si-fold-operands -o - %s | FileCheck -check-prefix=GCN %s
--- |
define amdgpu_kernel void @mov_in_use_list_2x() {
unreachable
}
...
---

View File

@ -6,92 +6,7 @@
# that the post-RA run does manage to shrink it, but right now the
# resume crashes
--- |
define amdgpu_kernel void @shrink_add_vop3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.ptr = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
%b.ptr = getelementptr i32, i32 addrspace(1)* %a.ptr, i32 1
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 %tid.ext
%a = load volatile i32, i32 addrspace(1)* %a.ptr
%b = load volatile i32, i32 addrspace(1)* %b.ptr
%result = add i32 %a, %b
store volatile i32 %result, i32 addrspace(1)* %out.gep
ret void
}
define amdgpu_kernel void @shrink_sub_vop3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.ptr = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
%b.ptr = getelementptr i32, i32 addrspace(1)* %a.ptr, i32 1
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 %tid.ext
%a = load volatile i32, i32 addrspace(1)* %a.ptr
%b = load volatile i32, i32 addrspace(1)* %b.ptr
%result = sub i32 %a, %b
store volatile i32 %result, i32 addrspace(1)* %out.gep
ret void
}
define amdgpu_kernel void @shrink_subrev_vop3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.ptr = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
%b.ptr = getelementptr i32, i32 addrspace(1)* %a.ptr, i32 1
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 %tid.ext
%a = load volatile i32, i32 addrspace(1)* %a.ptr
%b = load volatile i32, i32 addrspace(1)* %b.ptr
%result = sub i32 %a, %b
store volatile i32 %result, i32 addrspace(1)* %out.gep
ret void
}
define amdgpu_kernel void @check_addc_src2_vop3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.ptr = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
%b.ptr = getelementptr i32, i32 addrspace(1)* %a.ptr, i32 1
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 %tid.ext
%a = load volatile i32, i32 addrspace(1)* %a.ptr
%b = load volatile i32, i32 addrspace(1)* %b.ptr
%result = add i32 %a, %b
store volatile i32 %result, i32 addrspace(1)* %out.gep
ret void
}
define amdgpu_kernel void @shrink_addc_vop3(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.ptr = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
%b.ptr = getelementptr i32, i32 addrspace(1)* %a.ptr, i32 1
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 %tid.ext
%a = load volatile i32, i32 addrspace(1)* %a.ptr
%b = load volatile i32, i32 addrspace(1)* %b.ptr
%result = add i32 %a, %b
store volatile i32 %result, i32 addrspace(1)* %out.gep
ret void
}
define amdgpu_kernel void @shrink_addc_undef_vcc(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.ptr = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %tid.ext
%b.ptr = getelementptr i32, i32 addrspace(1)* %a.ptr, i32 1
%out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 %tid.ext
%a = load volatile i32, i32 addrspace(1)* %a.ptr
%b = load volatile i32, i32 addrspace(1)* %b.ptr
%result = add i32 %a, %b
store volatile i32 %result, i32 addrspace(1)* %out.gep
ret void
}
declare i32 @llvm.amdgcn.workitem.id.x() #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
...
---
# GCN-LABEL: name: shrink_add_vop3{{$}}
# GCN: %29, %9 = V_ADD_I32_e64 %19, %17, implicit %exec
# GCN: %24 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit %exec
@ -151,13 +66,13 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
%4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%4 = S_LOAD_DWORDX2_IMM %0, 9, 0
%5 = S_LOAD_DWORDX2_IMM %0, 11, 0
%26 = V_ASHRREV_I32_e32 31, %3, implicit %exec
%27 = REG_SEQUENCE %3, 1, %26, 2
%10 = S_MOV_B32 61440
@ -166,11 +81,11 @@ body: |
%13 = REG_SEQUENCE killed %5, 17, %12, 18
%28 = V_LSHL_B64 killed %27, 2, implicit %exec
%16 = REG_SEQUENCE killed %4, 17, %12, 18
%17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.a.ptr)
%19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.b.ptr)
%17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec
%19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec
%29, %9 = V_ADD_I32_e64 %19, %17, implicit %exec
%24 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit %exec
BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into %ir.out.gep)
BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec
S_ENDPGM
...
@ -235,13 +150,13 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
%4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%4 = S_LOAD_DWORDX2_IMM %0, 9, 0
%5 = S_LOAD_DWORDX2_IMM %0, 11, 0
%26 = V_ASHRREV_I32_e32 31, %3, implicit %exec
%27 = REG_SEQUENCE %3, 1, %26, 2
%10 = S_MOV_B32 61440
@ -250,11 +165,11 @@ body: |
%13 = REG_SEQUENCE killed %5, 17, %12, 18
%28 = V_LSHL_B64 killed %27, 2, implicit %exec
%16 = REG_SEQUENCE killed %4, 17, %12, 18
%17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.a.ptr)
%19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.b.ptr)
%17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec
%19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec
%29, %9 = V_SUB_I32_e64 %19, %17, implicit %exec
%24 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit %exec
BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into %ir.out.gep)
BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec
S_ENDPGM
...
@ -319,13 +234,13 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
%4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%4 = S_LOAD_DWORDX2_IMM %0, 9, 0
%5 = S_LOAD_DWORDX2_IMM %0, 11, 0
%26 = V_ASHRREV_I32_e32 31, %3, implicit %exec
%27 = REG_SEQUENCE %3, 1, %26, 2
%10 = S_MOV_B32 61440
@ -334,11 +249,11 @@ body: |
%13 = REG_SEQUENCE killed %5, 17, %12, 18
%28 = V_LSHL_B64 killed %27, 2, implicit %exec
%16 = REG_SEQUENCE killed %4, 17, %12, 18
%17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.a.ptr)
%19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.b.ptr)
%17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec
%19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec
%29, %9 = V_SUBREV_I32_e64 %19, %17, implicit %exec
%24 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit %exec
BUFFER_STORE_DWORD_ADDR64 %29, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into %ir.out.gep)
BUFFER_STORE_DWORD_ADDR64 %29, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec
S_ENDPGM
...
@ -402,13 +317,13 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
%4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%4 = S_LOAD_DWORDX2_IMM %0, 9, 0
%5 = S_LOAD_DWORDX2_IMM %0, 11, 0
%26 = V_ASHRREV_I32_e32 31, %3, implicit %exec
%27 = REG_SEQUENCE %3, 1, %26, 2
%10 = S_MOV_B32 61440
@ -417,12 +332,12 @@ body: |
%13 = REG_SEQUENCE killed %5, 17, %12, 18
%28 = V_LSHL_B64 killed %27, 2, implicit %exec
%16 = REG_SEQUENCE killed %4, 17, %12, 18
%17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.a.ptr)
%19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.b.ptr)
%17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec
%19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec
%9 = S_MOV_B64 0
%29, %vcc = V_ADDC_U32_e64 %19, %17, %9, implicit %exec
%24 = V_CNDMASK_B32_e64 0, 1, killed %vcc, implicit %exec
BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into %ir.out.gep)
BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec
S_ENDPGM
...
@ -487,13 +402,13 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
%4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%4 = S_LOAD_DWORDX2_IMM %0, 9, 0
%5 = S_LOAD_DWORDX2_IMM %0, 11, 0
%26 = V_ASHRREV_I32_e32 31, %3, implicit %exec
%27 = REG_SEQUENCE %3, 1, %26, 2
%10 = S_MOV_B32 61440
@ -502,12 +417,12 @@ body: |
%13 = REG_SEQUENCE killed %5, 17, %12, 18
%28 = V_LSHL_B64 killed %27, 2, implicit %exec
%16 = REG_SEQUENCE killed %4, 17, %12, 18
%17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.a.ptr)
%19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.b.ptr)
%17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec
%19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec
%vcc = S_MOV_B64 0
%29, %vcc = V_ADDC_U32_e64 %19, %17, %vcc, implicit %exec
%24 = V_CNDMASK_B32_e64 0, 1, killed %vcc, implicit %exec
BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into %ir.out.gep)
BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec
S_ENDPGM
...
@ -572,13 +487,13 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
body: |
bb.0 (%ir-block.0):
bb.0:
liveins: %sgpr0_sgpr1, %vgpr0
%3 = COPY %vgpr0
%0 = COPY %sgpr0_sgpr1
%4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
%4 = S_LOAD_DWORDX2_IMM %0, 9, 0
%5 = S_LOAD_DWORDX2_IMM %0, 11, 0
%26 = V_ASHRREV_I32_e32 31, %3, implicit %exec
%27 = REG_SEQUENCE %3, 1, %26, 2
%10 = S_MOV_B32 61440
@ -587,11 +502,11 @@ body: |
%13 = REG_SEQUENCE killed %5, 17, %12, 18
%28 = V_LSHL_B64 killed %27, 2, implicit %exec
%16 = REG_SEQUENCE killed %4, 17, %12, 18
%17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.a.ptr)
%19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec :: (volatile load 4 from %ir.b.ptr)
%17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec
%19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec
%29, %vcc = V_ADDC_U32_e64 %19, %17, undef %vcc, implicit %exec
%24 = V_CNDMASK_B32_e64 0, 1, killed %vcc, implicit %exec
BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into %ir.out.gep)
BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec
S_ENDPGM
...

View File

@ -1,18 +1,6 @@
# RUN: llc -mtriple=amdgcn -mcpu=fiji -verify-machineinstrs -run-pass si-insert-waits -o - %s | FileCheck %s
--- |
define float @waitcnt-permute(i32 %x, i32 %y) {
entry:
%0 = call i32 @llvm.amdgcn.ds.bpermute(i32 %x, i32 %y)
%1 = bitcast i32 %0 to float
%2 = fadd float 1.000000e+00, %1
ret float %2
}
declare i32 @llvm.amdgcn.ds.bpermute(i32, i32)
...
---
# CHECK-LABEL: name: waitcnt-permute{{$}}
# CHECK: DS_BPERMUTE_B32
# CHECK-NEXT: S_WAITCNT 127