llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fpext.mir

169 lines
6.4 KiB
YAML

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -run-pass=legalizer %s -o - | FileCheck %s
---
name: test_fpext_f16_to_f32
body: |
bb.0:
liveins: $vgpr0
; CHECK-LABEL: name: test_fpext_f16_to_f32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; CHECK: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
; CHECK: $vgpr0 = COPY [[FPEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s16) = G_TRUNC %0
%2:_(s32) = G_FPEXT %1
$vgpr0 = COPY %2
...
---
name: test_fpext_v2f16_to_v2f32
body: |
bb.0:
liveins: $vgpr0
; CHECK-LABEL: name: test_fpext_v2f16_to_v2f32
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
; CHECK: %4:_(s32) = nnan G_FPEXT [[UV]](s16)
; CHECK: %5:_(s32) = nnan G_FPEXT [[UV1]](s16)
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR %4(s32), %5(s32)
; CHECK: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(<2 x s32>) = nnan G_FPEXT %0
$vgpr0_vgpr1 = COPY %1
...
---
name: test_fpext_v2f16_to_v2f32_w_flags
body: |
bb.0:
liveins: $vgpr0
; CHECK-LABEL: name: test_fpext_v2f16_to_v2f32_w_flags
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
; CHECK: %4:_(s32) = nnan G_FPEXT [[UV]](s16)
; CHECK: %5:_(s32) = nnan G_FPEXT [[UV1]](s16)
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR %4(s32), %5(s32)
; CHECK: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(<2 x s32>) = nnan G_FPEXT %0
$vgpr0_vgpr1 = COPY %1
...
---
name: test_fpext_v3f16_to_v3f32
body: |
bb.0:
liveins: $vgpr0
; CHECK-LABEL: name: test_fpext_v3f16_to_v3f32
; CHECK: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; CHECK: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[EXTRACT]](<3 x s16>)
; CHECK: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
; CHECK: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
; CHECK: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[FPEXT]](s32), [[FPEXT1]](s32), [[FPEXT2]](s32)
; CHECK: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
%0:_(<3 x s16>) = G_IMPLICIT_DEF
%1:_(<3 x s32>) = G_FPEXT %0
$vgpr0_vgpr1_vgpr2 = COPY %1
...
---
name: test_fpext_v4f16_to_v4f32
body: |
bb.0:
liveins: $vgpr0
; CHECK-LABEL: name: test_fpext_v4f16_to_v4f32
; CHECK: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
; CHECK: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
; CHECK: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
; CHECK: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
; CHECK: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16)
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[FPEXT]](s32), [[FPEXT1]](s32), [[FPEXT2]](s32), [[FPEXT3]](s32)
; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>)
%0:_(<4 x s16>) = G_IMPLICIT_DEF
%1:_(<4 x s32>) = G_FPEXT %0
$vgpr0_vgpr1_vgpr2_vgpr3 = COPY %1
...
---
name: test_fpext_f32_to_f64
body: |
bb.0:
liveins: $vgpr0
; CHECK-LABEL: name: test_fpext_f32_to_f64
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[FPEXT:%[0-9]+]]:_(s64) = G_FPEXT [[COPY]](s32)
; CHECK: $vgpr0_vgpr1 = COPY [[FPEXT]](s64)
%0:_(s32) = COPY $vgpr0
%1:_(s64) = G_FPEXT %0
$vgpr0_vgpr1 = COPY %1
...
---
name: test_fpext_v2f32_to_v2f64
body: |
bb.0:
liveins: $vgpr0_vgpr1
; CHECK-LABEL: name: test_fpext_v2f32_to_v2f64
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; CHECK: [[FPEXT:%[0-9]+]]:_(s64) = G_FPEXT [[UV]](s32)
; CHECK: [[FPEXT1:%[0-9]+]]:_(s64) = G_FPEXT [[UV1]](s32)
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[FPEXT]](s64), [[FPEXT1]](s64)
; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s64>) = G_FPEXT %0
$vgpr0_vgpr1_vgpr2_vgpr3 = COPY %1
...
---
name: test_fpext_v3f32_to_v3f64
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2
; CHECK-LABEL: name: test_fpext_v3f32_to_v3f64
; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<3 x s32>)
; CHECK: [[FPEXT:%[0-9]+]]:_(s64) = G_FPEXT [[UV]](s32)
; CHECK: [[FPEXT1:%[0-9]+]]:_(s64) = G_FPEXT [[UV1]](s32)
; CHECK: [[FPEXT2:%[0-9]+]]:_(s64) = G_FPEXT [[UV2]](s32)
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[FPEXT]](s64), [[FPEXT1]](s64), [[FPEXT2]](s64)
; CHECK: S_NOP 0, implicit [[BUILD_VECTOR]](<3 x s64>)
%0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(<3 x s64>) = G_FPEXT %0
S_NOP 0, implicit %1
...
---
name: test_fpext_v4f32_to_v4f64
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK-LABEL: name: test_fpext_v4f32_to_v4f64
; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
; CHECK: [[FPEXT:%[0-9]+]]:_(s64) = G_FPEXT [[UV]](s32)
; CHECK: [[FPEXT1:%[0-9]+]]:_(s64) = G_FPEXT [[UV1]](s32)
; CHECK: [[FPEXT2:%[0-9]+]]:_(s64) = G_FPEXT [[UV2]](s32)
; CHECK: [[FPEXT3:%[0-9]+]]:_(s64) = G_FPEXT [[UV3]](s32)
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[FPEXT]](s64), [[FPEXT1]](s64), [[FPEXT2]](s64), [[FPEXT3]](s64)
; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<4 x s64>)
%0:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%1:_(<4 x s64>) = G_FPEXT %0
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %1
...