llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-vector-args-gfx7.mir

340 lines
16 KiB
YAML

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=hawaii -run-pass=legalizer -verify-machineinstrs -o - %s | FileCheck -check-prefix=GFX7 %s
--- |
define <2 x i16> @and_v2i16(<2 x i16> %a, <2 x i16> %b) #0 {
%and = and <2 x i16> %a, %b
ret <2 x i16> %and
}
define <3 x i16> @add_v3i16(<3 x i16> %a, <3 x i16> %b) #0 {
%add = add <3 x i16> %a, %b
ret <3 x i16> %add
}
define <3 x i16> @shl_v3i16(<3 x i16> %a, <3 x i16> %b) #0 {
%shl = shl <3 x i16> %a, %b
ret <3 x i16> %shl
}
define <4 x half> @fma_v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c) {
%fma = call <4 x half> @llvm.fma.v4f16(<4 x half> %a, <4 x half> %b, <4 x half> %c)
ret <4 x half> %fma
}
define amdgpu_ps <5 x half> @maxnum_v5i16(<5 x half> %a, <5 x half> %b) {
%fma = call <5 x half> @llvm.maxnum.v5f16(<5 x half> %a, <5 x half> %b)
ret <5 x half> %fma
}
declare <4 x half> @llvm.fma.v4f16(<4 x half>, <4 x half>, <4 x half>)
declare <5 x half> @llvm.maxnum.v5f16(<5 x half>, <5 x half>)
...
---
name: and_v2i16
body: |
bb.1:
liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
; GFX7-LABEL: name: and_v2i16
; GFX7: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX7-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX7-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32)
; GFX7-NEXT: [[TRUNC:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[BUILD_VECTOR]](<2 x s32>)
; GFX7-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
; GFX7-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
; GFX7-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32)
; GFX7-NEXT: [[TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[BUILD_VECTOR1]](<2 x s32>)
; GFX7-NEXT: [[AND:%[0-9]+]]:_(<2 x s16>) = G_AND [[TRUNC]], [[TRUNC1]]
; GFX7-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[AND]](<2 x s16>)
; GFX7-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; GFX7-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
; GFX7-NEXT: $vgpr0 = COPY [[BITCAST]](s32)
; GFX7-NEXT: $vgpr1 = COPY [[LSHR]](s32)
; GFX7-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
%3:_(s32) = COPY $vgpr0
%4:_(s32) = COPY $vgpr1
%5:_(<2 x s32>) = G_BUILD_VECTOR %3(s32), %4(s32)
%0:_(<2 x s16>) = G_TRUNC %5(<2 x s32>)
%6:_(s32) = COPY $vgpr2
%7:_(s32) = COPY $vgpr3
%8:_(<2 x s32>) = G_BUILD_VECTOR %6(s32), %7(s32)
%1:_(<2 x s16>) = G_TRUNC %8(<2 x s32>)
%9:_(<2 x s16>) = G_AND %0, %1
%13:_(s16), %14:_(s16) = G_UNMERGE_VALUES %9(<2 x s16>)
%11:_(s32) = G_ANYEXT %13(s16)
%12:_(s32) = G_ANYEXT %14(s16)
$vgpr0 = COPY %11(s32)
$vgpr1 = COPY %12(s32)
SI_RETURN implicit $vgpr0, implicit $vgpr1
...
---
name: add_v3i16
body: |
bb.1:
liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
; GFX7-LABEL: name: add_v3i16
; GFX7: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX7-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX7-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
; GFX7-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
; GFX7-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
; GFX7-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
; GFX7-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY3]]
; GFX7-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY4]]
; GFX7-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY5]]
; GFX7-NEXT: $vgpr0 = COPY [[ADD]](s32)
; GFX7-NEXT: $vgpr1 = COPY [[ADD1]](s32)
; GFX7-NEXT: $vgpr2 = COPY [[ADD2]](s32)
; GFX7-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
%3:_(s32) = COPY $vgpr0
%4:_(s32) = COPY $vgpr1
%5:_(s32) = COPY $vgpr2
%6:_(<3 x s32>) = G_BUILD_VECTOR %3(s32), %4(s32), %5(s32)
%0:_(<3 x s16>) = G_TRUNC %6(<3 x s32>)
%7:_(s32) = COPY $vgpr3
%8:_(s32) = COPY $vgpr4
%9:_(s32) = COPY $vgpr5
%10:_(<3 x s32>) = G_BUILD_VECTOR %7(s32), %8(s32), %9(s32)
%1:_(<3 x s16>) = G_TRUNC %10(<3 x s32>)
%11:_(<3 x s16>) = G_ADD %0, %1
%16:_(s16), %17:_(s16), %18:_(s16) = G_UNMERGE_VALUES %11(<3 x s16>)
%13:_(s32) = G_ANYEXT %16(s16)
%14:_(s32) = G_ANYEXT %17(s16)
%15:_(s32) = G_ANYEXT %18(s16)
$vgpr0 = COPY %13(s32)
$vgpr1 = COPY %14(s32)
$vgpr2 = COPY %15(s32)
SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
...
---
name: shl_v3i16
body: |
bb.1:
liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
; GFX7-LABEL: name: shl_v3i16
; GFX7: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX7-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX7-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
; GFX7-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
; GFX7-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
; GFX7-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
; GFX7-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; GFX7-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]]
; GFX7-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[AND]](s32)
; GFX7-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]]
; GFX7-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[AND1]](s32)
; GFX7-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C]]
; GFX7-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[AND2]](s32)
; GFX7-NEXT: $vgpr0 = COPY [[SHL]](s32)
; GFX7-NEXT: $vgpr1 = COPY [[SHL1]](s32)
; GFX7-NEXT: $vgpr2 = COPY [[SHL2]](s32)
; GFX7-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
%3:_(s32) = COPY $vgpr0
%4:_(s32) = COPY $vgpr1
%5:_(s32) = COPY $vgpr2
%6:_(<3 x s32>) = G_BUILD_VECTOR %3(s32), %4(s32), %5(s32)
%0:_(<3 x s16>) = G_TRUNC %6(<3 x s32>)
%7:_(s32) = COPY $vgpr3
%8:_(s32) = COPY $vgpr4
%9:_(s32) = COPY $vgpr5
%10:_(<3 x s32>) = G_BUILD_VECTOR %7(s32), %8(s32), %9(s32)
%1:_(<3 x s16>) = G_TRUNC %10(<3 x s32>)
%11:_(<3 x s16>) = G_SHL %0, %1(<3 x s16>)
%16:_(s16), %17:_(s16), %18:_(s16) = G_UNMERGE_VALUES %11(<3 x s16>)
%13:_(s32) = G_ANYEXT %16(s16)
%14:_(s32) = G_ANYEXT %17(s16)
%15:_(s32) = G_ANYEXT %18(s16)
$vgpr0 = COPY %13(s32)
$vgpr1 = COPY %14(s32)
$vgpr2 = COPY %15(s32)
SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
...
---
name: fma_v4f16
body: |
bb.1:
liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11
; GFX7-LABEL: name: fma_v4f16
; GFX7: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX7-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX7-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
; GFX7-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
; GFX7-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
; GFX7-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
; GFX7-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
; GFX7-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
; GFX7-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
; GFX7-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
; GFX7-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
; GFX7-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
; GFX7-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX7-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX7-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; GFX7-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; GFX7-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s32)
; GFX7-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s32)
; GFX7-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY6]](s32)
; GFX7-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[COPY7]](s32)
; GFX7-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s32)
; GFX7-NEXT: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32)
; GFX7-NEXT: [[TRUNC10:%[0-9]+]]:_(s16) = G_TRUNC [[COPY10]](s32)
; GFX7-NEXT: [[TRUNC11:%[0-9]+]]:_(s16) = G_TRUNC [[COPY11]](s32)
; GFX7-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
; GFX7-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC4]](s16)
; GFX7-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC8]](s16)
; GFX7-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FPEXT]], [[FPEXT1]], [[FPEXT2]]
; GFX7-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA]](s32)
; GFX7-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
; GFX7-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC5]](s16)
; GFX7-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC9]](s16)
; GFX7-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FPEXT3]], [[FPEXT4]], [[FPEXT5]]
; GFX7-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA1]](s32)
; GFX7-NEXT: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16)
; GFX7-NEXT: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC6]](s16)
; GFX7-NEXT: [[FPEXT8:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC10]](s16)
; GFX7-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FPEXT6]], [[FPEXT7]], [[FPEXT8]]
; GFX7-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA2]](s32)
; GFX7-NEXT: [[FPEXT9:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC3]](s16)
; GFX7-NEXT: [[FPEXT10:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC7]](s16)
; GFX7-NEXT: [[FPEXT11:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC11]](s16)
; GFX7-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FPEXT9]], [[FPEXT10]], [[FPEXT11]]
; GFX7-NEXT: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA3]](s32)
; GFX7-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC]](s16)
; GFX7-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC1]](s16)
; GFX7-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC2]](s16)
; GFX7-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC3]](s16)
; GFX7-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX7-NEXT: $vgpr1 = COPY [[ANYEXT1]](s32)
; GFX7-NEXT: $vgpr2 = COPY [[ANYEXT2]](s32)
; GFX7-NEXT: $vgpr3 = COPY [[ANYEXT3]](s32)
; GFX7-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
%4:_(s32) = COPY $vgpr0
%5:_(s32) = COPY $vgpr1
%6:_(s32) = COPY $vgpr2
%7:_(s32) = COPY $vgpr3
%8:_(<4 x s32>) = G_BUILD_VECTOR %4(s32), %5(s32), %6(s32), %7(s32)
%0:_(<4 x s16>) = G_TRUNC %8(<4 x s32>)
%9:_(s32) = COPY $vgpr4
%10:_(s32) = COPY $vgpr5
%11:_(s32) = COPY $vgpr6
%12:_(s32) = COPY $vgpr7
%13:_(<4 x s32>) = G_BUILD_VECTOR %9(s32), %10(s32), %11(s32), %12(s32)
%1:_(<4 x s16>) = G_TRUNC %13(<4 x s32>)
%14:_(s32) = COPY $vgpr8
%15:_(s32) = COPY $vgpr9
%16:_(s32) = COPY $vgpr10
%17:_(s32) = COPY $vgpr11
%18:_(<4 x s32>) = G_BUILD_VECTOR %14(s32), %15(s32), %16(s32), %17(s32)
%2:_(<4 x s16>) = G_TRUNC %18(<4 x s32>)
%19:_(<4 x s16>) = G_FMA %0, %1, %2
%25:_(s16), %26:_(s16), %27:_(s16), %28:_(s16) = G_UNMERGE_VALUES %19(<4 x s16>)
%21:_(s32) = G_ANYEXT %25(s16)
%22:_(s32) = G_ANYEXT %26(s16)
%23:_(s32) = G_ANYEXT %27(s16)
%24:_(s32) = G_ANYEXT %28(s16)
$vgpr0 = COPY %21(s32)
$vgpr1 = COPY %22(s32)
$vgpr2 = COPY %23(s32)
$vgpr3 = COPY %24(s32)
SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
...
---
name: maxnum_v5i16
body: |
bb.1:
liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9
; GFX7-LABEL: name: maxnum_v5i16
; GFX7: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX7-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX7-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
; GFX7-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
; GFX7-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
; GFX7-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
; GFX7-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
; GFX7-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
; GFX7-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
; GFX7-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
; GFX7-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX7-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX7-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; GFX7-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
; GFX7-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s32)
; GFX7-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s32)
; GFX7-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[COPY6]](s32)
; GFX7-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[COPY7]](s32)
; GFX7-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s32)
; GFX7-NEXT: [[TRUNC9:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32)
; GFX7-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
; GFX7-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC5]](s16)
; GFX7-NEXT: [[FMAXNUM_IEEE:%[0-9]+]]:_(s32) = G_FMAXNUM_IEEE [[FPEXT]], [[FPEXT1]]
; GFX7-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMAXNUM_IEEE]](s32)
; GFX7-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
; GFX7-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC6]](s16)
; GFX7-NEXT: [[FMAXNUM_IEEE1:%[0-9]+]]:_(s32) = G_FMAXNUM_IEEE [[FPEXT2]], [[FPEXT3]]
; GFX7-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMAXNUM_IEEE1]](s32)
; GFX7-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16)
; GFX7-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC7]](s16)
; GFX7-NEXT: [[FMAXNUM_IEEE2:%[0-9]+]]:_(s32) = G_FMAXNUM_IEEE [[FPEXT4]], [[FPEXT5]]
; GFX7-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMAXNUM_IEEE2]](s32)
; GFX7-NEXT: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC3]](s16)
; GFX7-NEXT: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC8]](s16)
; GFX7-NEXT: [[FMAXNUM_IEEE3:%[0-9]+]]:_(s32) = G_FMAXNUM_IEEE [[FPEXT6]], [[FPEXT7]]
; GFX7-NEXT: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMAXNUM_IEEE3]](s32)
; GFX7-NEXT: [[FPEXT8:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC4]](s16)
; GFX7-NEXT: [[FPEXT9:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC9]](s16)
; GFX7-NEXT: [[FMAXNUM_IEEE4:%[0-9]+]]:_(s32) = G_FMAXNUM_IEEE [[FPEXT8]], [[FPEXT9]]
; GFX7-NEXT: [[FPTRUNC4:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMAXNUM_IEEE4]](s32)
; GFX7-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC]](s16)
; GFX7-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC1]](s16)
; GFX7-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC2]](s16)
; GFX7-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC3]](s16)
; GFX7-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC4]](s16)
; GFX7-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX7-NEXT: $vgpr1 = COPY [[ANYEXT1]](s32)
; GFX7-NEXT: $vgpr2 = COPY [[ANYEXT2]](s32)
; GFX7-NEXT: $vgpr3 = COPY [[ANYEXT3]](s32)
; GFX7-NEXT: $vgpr4 = COPY [[ANYEXT4]](s32)
; GFX7-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4
%2:_(s32) = COPY $vgpr0
%3:_(s32) = COPY $vgpr1
%4:_(s32) = COPY $vgpr2
%5:_(s32) = COPY $vgpr3
%6:_(s32) = COPY $vgpr4
%7:_(<5 x s32>) = G_BUILD_VECTOR %2(s32), %3(s32), %4(s32), %5(s32), %6(s32)
%0:_(<5 x s16>) = G_TRUNC %7(<5 x s32>)
%8:_(s32) = COPY $vgpr5
%9:_(s32) = COPY $vgpr6
%10:_(s32) = COPY $vgpr7
%11:_(s32) = COPY $vgpr8
%12:_(s32) = COPY $vgpr9
%13:_(<5 x s32>) = G_BUILD_VECTOR %8(s32), %9(s32), %10(s32), %11(s32), %12(s32)
%1:_(<5 x s16>) = G_TRUNC %13(<5 x s32>)
%15:_(<5 x s16>) = G_FMAXNUM %0, %1
%21:_(s16), %22:_(s16), %23:_(s16), %24:_(s16), %25:_(s16) = G_UNMERGE_VALUES %15(<5 x s16>)
%16:_(s32) = G_ANYEXT %21(s16)
%17:_(s32) = G_ANYEXT %22(s16)
%18:_(s32) = G_ANYEXT %23(s16)
%19:_(s32) = G_ANYEXT %24(s16)
%20:_(s32) = G_ANYEXT %25(s16)
$vgpr0 = COPY %16(s32)
$vgpr1 = COPY %17(s32)
$vgpr2 = COPY %18(s32)
$vgpr3 = COPY %19(s32)
$vgpr4 = COPY %20(s32)
SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4
...