llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-extract-vector-elt...

2257 lines
117 KiB
YAML

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -run-pass=legalizer -global-isel-abort=0 %s -o - | FileCheck %s
---
name: extract_vector_elt_0_v2i32
body: |
bb.0:
liveins: $vgpr0_vgpr1
; CHECK-LABEL: name: extract_vector_elt_0_v2i32
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY]](<2 x s32>), 0
; CHECK: $vgpr0 = COPY [[EXTRACT]](s32)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(s32) = G_CONSTANT i32 0
%2:_(s32) = G_EXTRACT_VECTOR_ELT %0, %1
$vgpr0 = COPY %2
...
---
name: extract_vector_elt_1_v2i32
body: |
bb.0:
liveins: $vgpr0_vgpr1
; CHECK-LABEL: name: extract_vector_elt_1_v2i32
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY]](<2 x s32>), 32
; CHECK: $vgpr0 = COPY [[EXTRACT]](s32)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(s32) = G_CONSTANT i32 1
%2:_(s32) = G_EXTRACT_VECTOR_ELT %0, %1
$vgpr0 = COPY %2
...
---
name: extract_vector_elt_2_v2i32
body: |
bb.0:
liveins: $vgpr0_vgpr1
; CHECK-LABEL: name: extract_vector_elt_2_v2i32
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY]](<2 x s32>), 32
; CHECK: $vgpr0 = COPY [[EXTRACT]](s32)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(s32) = G_CONSTANT i32 1
%2:_(s32) = G_EXTRACT_VECTOR_ELT %0, %1
$vgpr0 = COPY %2
...
---
name: extract_vector_elt_0_v3i32
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2
; CHECK-LABEL: name: extract_vector_elt_0_v3i32
; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY]](<3 x s32>), 0
; CHECK: $vgpr0 = COPY [[EXTRACT]](s32)
%0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(s32) = G_CONSTANT i32 0
%2:_(s32) = G_EXTRACT_VECTOR_ELT %0, %1
$vgpr0 = COPY %2
...
---
name: extract_vector_elt_0_v4i32
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK-LABEL: name: extract_vector_elt_0_v4i32
; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY]](<4 x s32>), 0
; CHECK: $vgpr0 = COPY [[EXTRACT]](s32)
%0:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%1:_(s32) = G_CONSTANT i32 0
%2:_(s32) = G_EXTRACT_VECTOR_ELT %0, %1
$vgpr0 = COPY %2
...
---
name: extract_vector_elt_0_v5i32
body: |
bb.0:
liveins: $vgpr0
; CHECK-LABEL: name: extract_vector_elt_0_v5i32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: $vgpr0 = COPY [[COPY1]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(<5 x s32>) = G_BUILD_VECTOR %0, %0, %0, %0, %0
%2:_(s32) = G_CONSTANT i32 0
%3:_(s32) = G_EXTRACT_VECTOR_ELT %1, %2
$vgpr0 = COPY %3
...
---
name: extract_vector_elt_0_v6i32
body: |
bb.0:
liveins: $vgpr0
; CHECK-LABEL: name: extract_vector_elt_0_v6i32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: $vgpr0 = COPY [[COPY1]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(<6 x s32>) = G_BUILD_VECTOR %0, %0, %0, %0, %0, %0
%2:_(s32) = G_CONSTANT i32 0
%3:_(s32) = G_EXTRACT_VECTOR_ELT %1, %2
$vgpr0 = COPY %3
...
---
name: extract_vector_elt_0_v7i32
body: |
bb.0:
liveins: $vgpr0
; CHECK-LABEL: name: extract_vector_elt_0_v7i32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: $vgpr0 = COPY [[COPY1]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(<7 x s32>) = G_BUILD_VECTOR %0, %0, %0, %0, %0, %0, %0
%2:_(s32) = G_CONSTANT i32 0
%3:_(s32) = G_EXTRACT_VECTOR_ELT %1, %2
$vgpr0 = COPY %3
...
---
name: extract_vector_elt_0_v8i32
body: |
bb.0:
liveins: $vgpr0
; CHECK-LABEL: name: extract_vector_elt_0_v8i32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: $vgpr0 = COPY [[COPY1]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(<8 x s32>) = G_BUILD_VECTOR %0, %0, %0, %0, %0, %0, %0, %0
%2:_(s32) = G_CONSTANT i32 0
%3:_(s32) = G_EXTRACT_VECTOR_ELT %1, %2
$vgpr0 = COPY %3
...
---
name: extract_vector_elt_0_v16i32
body: |
bb.0:
liveins: $vgpr0
; CHECK-LABEL: name: extract_vector_elt_0_v16i32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: $vgpr0 = COPY [[COPY1]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(<16 x s32>) = G_BUILD_VECTOR %0, %0, %0, %0, %0, %0, %0, %0, %0, %0, %0, %0, %0, %0, %0, %0
%2:_(s32) = G_CONSTANT i32 0
%3:_(s32) = G_EXTRACT_VECTOR_ELT %1, %2
$vgpr0 = COPY %3
...
---
name: extract_vector_elt_var_v2i32
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2
; CHECK-LABEL: name: extract_vector_elt_var_v2i32
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s32>), [[COPY1]](s32)
; CHECK: $vgpr0 = COPY [[EVEC]](s32)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
%2:_(s32) = G_EXTRACT_VECTOR_ELT %0, %1
$vgpr0 = COPY %2
...
---
name: extract_vector_elt_var_v8i32
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
; CHECK-LABEL: name: extract_vector_elt_var_v8i32
; CHECK: [[COPY:%[0-9]+]]:_(<8 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<8 x s32>), [[COPY1]](s32)
; CHECK: $vgpr0 = COPY [[EVEC]](s32)
%0:_(<8 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
%1:_(s32) = COPY $vgpr2
%2:_(s32) = G_EXTRACT_VECTOR_ELT %0, %1
$vgpr0 = COPY %2
...
---
name: extract_vector_elt_0_v2i8_i32
body: |
bb.0:
; CHECK-LABEL: name: extract_vector_elt_0_v2i8_i32
; CHECK: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY [[DEF]](<2 x s32>)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV]], 8
; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV1]], 8
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32)
; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[BUILD_VECTOR]](<2 x s32>), 0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[EXTRACT]](s32)
; CHECK: $vgpr0 = COPY [[COPY1]](s32)
%0:_(<2 x s8>) = G_IMPLICIT_DEF
%1:_(s32) = G_CONSTANT i32 0
%2:_(s8) = G_EXTRACT_VECTOR_ELT %0, %1
%3:_(s32) = G_ANYEXT %2
$vgpr0 = COPY %3
...
---
name: extract_vector_elt_0_v2i16_i32
body: |
bb.0:
; CHECK-LABEL: name: extract_vector_elt_0_v2i16_i32
; CHECK: [[DEF:%[0-9]+]]:_(<2 x s16>) = G_IMPLICIT_DEF
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[DEF]](<2 x s16>)
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: $vgpr0 = COPY [[COPY]](s32)
%0:_(<2 x s16>) = G_IMPLICIT_DEF
%1:_(s32) = G_CONSTANT i32 0
%2:_(s16) = G_EXTRACT_VECTOR_ELT %0, %1
%3:_(s32) = G_ANYEXT %2
$vgpr0 = COPY %3
...
---
name: extract_vector_elt_0_v2i1_i32
body: |
bb.0:
; CHECK-LABEL: name: extract_vector_elt_0_v2i1_i32
; CHECK: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY [[DEF]](<2 x s32>)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV]], 1
; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV1]], 1
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32)
; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[BUILD_VECTOR]](<2 x s32>), 0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[EXTRACT]](s32)
; CHECK: $vgpr0 = COPY [[COPY1]](s32)
%0:_(<2 x s1>) = G_IMPLICIT_DEF
%1:_(s32) = G_CONSTANT i32 0
%2:_(s1) = G_EXTRACT_VECTOR_ELT %0, %1
%3:_(s32) = G_ANYEXT %2
$vgpr0 = COPY %3
...
---
name: extract_vector_elt_0_v2i1_i1
body: |
bb.0:
; CHECK-LABEL: name: extract_vector_elt_0_v2i1_i1
; CHECK: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
; CHECK: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY [[DEF]](<2 x s32>)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV]], 1
; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV1]], 1
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32)
; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[BUILD_VECTOR]](<2 x s32>), 0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[EXTRACT]](s32)
; CHECK: $vgpr0 = COPY [[COPY1]](s32)
%0:_(<2 x s1>) = G_IMPLICIT_DEF
%1:_(s1) = G_CONSTANT i1 false
%2:_(s1) = G_EXTRACT_VECTOR_ELT %0, %1
%3:_(s32) = G_ANYEXT %2
$vgpr0 = COPY %3
...
---
name: extract_vector_elt_v2s8_varidx_i32
body: |
bb.0:
liveins: $vgpr0, $vgpr1
; CHECK-LABEL: name: extract_vector_elt_v2s8_varidx_i32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY4]], 8
; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[COPY3]](s32)
; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY5]], 8
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32)
; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[BUILD_VECTOR]](<2 x s32>), [[COPY1]](s32)
; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY [[EVEC]](s32)
; CHECK: $vgpr0 = COPY [[COPY6]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s16) = G_TRUNC %0
%3:_(<2 x s8>) = G_BITCAST %2
%4:_(s8) = G_EXTRACT_VECTOR_ELT %3, %1
%5:_(s32) = G_ANYEXT %4
$vgpr0 = COPY %5
...
---
name: extract_vector_elt_v2s8_constidx_0_i32
body: |
bb.0:
liveins: $vgpr0
; CHECK-LABEL: name: extract_vector_elt_v2s8_constidx_0_i32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY4]], 8
; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[COPY3]](s32)
; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY5]], 8
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32)
; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[BUILD_VECTOR]](<2 x s32>), 0
; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY [[EXTRACT]](s32)
; CHECK: $vgpr0 = COPY [[COPY6]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s16) = G_TRUNC %0
%3:_(<2 x s8>) = G_BITCAST %2
%4:_(s32) = G_CONSTANT i32 0
%5:_(s8) = G_EXTRACT_VECTOR_ELT %3, %4
%6:_(s32) = G_ANYEXT %5
$vgpr0 = COPY %6
...
---
name: extract_vector_elt_v2s8_constidx_1_i32
body: |
bb.0:
liveins: $vgpr0
; CHECK-LABEL: name: extract_vector_elt_v2s8_constidx_1_i32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY4]], 8
; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[COPY3]](s32)
; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY5]], 8
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32)
; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[BUILD_VECTOR]](<2 x s32>), 32
; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY [[EXTRACT]](s32)
; CHECK: $vgpr0 = COPY [[COPY6]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s16) = G_TRUNC %0
%3:_(<2 x s8>) = G_BITCAST %2
%4:_(s32) = G_CONSTANT i32 1
%5:_(s8) = G_EXTRACT_VECTOR_ELT %3, %4
%6:_(s32) = G_ANYEXT %5
$vgpr0 = COPY %6
...
---
name: extract_vector_elt_v4s4_varidx_i32
body: |
bb.0:
liveins: $vgpr0, $vgpr1
; CHECK-LABEL: name: extract_vector_elt_v4s4_varidx_i32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
; CHECK: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C3]](s32)
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
; CHECK: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C4]](s32)
; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C5]](s32)
; CHECK: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
; CHECK: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C6]](s32)
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY6]], 4
; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY [[COPY3]](s32)
; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY7]], 4
; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY [[COPY4]](s32)
; CHECK: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY8]], 4
; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY [[COPY5]](s32)
; CHECK: [[SEXT_INREG3:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY9]], 4
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32), [[SEXT_INREG2]](s32), [[SEXT_INREG3]](s32)
; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[BUILD_VECTOR]](<4 x s32>), [[COPY1]](s32)
; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY [[EVEC]](s32)
; CHECK: $vgpr0 = COPY [[COPY10]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s16) = G_TRUNC %0
%3:_(<4 x s4>) = G_BITCAST %2
%4:_(s4) = G_EXTRACT_VECTOR_ELT %3, %1
%5:_(s32) = G_ANYEXT %4
$vgpr0 = COPY %5
...
---
name: extract_vector_elt_v3s8_varidx_i32
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2, $vgpr3
; CHECK-LABEL: name: extract_vector_elt_v3s8_varidx_i32
; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
; CHECK: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY [[COPY]](<3 x s32>)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV]], 8
; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV1]], 8
; CHECK: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV2]], 8
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32), [[SEXT_INREG2]](s32)
; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[BUILD_VECTOR]](<3 x s32>), [[COPY1]](s32)
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[EVEC]](s32)
; CHECK: $vgpr0 = COPY [[COPY3]](s32)
%0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(s32) = COPY $vgpr3
%2:_(<3 x s8>) = G_TRUNC %0
%3:_(s8) = G_EXTRACT_VECTOR_ELT %2, %1
%4:_(s32) = G_ANYEXT %3
$vgpr0 = COPY %4
...
---
name: extract_vector_elt_v4s8_varidx_i32
body: |
bb.0:
liveins: $vgpr0, $vgpr1
; CHECK-LABEL: name: extract_vector_elt_v4s8_varidx_i32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C3]]
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32)
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C3]]
; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C1]](s32)
; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]]
; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C2]](s32)
; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]]
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
; CHECK: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C4]]
; CHECK: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND4]], [[C4]](s32)
; CHECK: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[OR2]], [[SHL3]](s32)
; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
; CHECK: $vgpr0 = COPY [[COPY6]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(<4 x s8>) = G_BITCAST %0
%3:_(s8) = G_EXTRACT_VECTOR_ELT %2, %1
%4:_(s32) = G_ANYEXT %3
$vgpr0 = COPY %4
...
---
name: extract_vector_elt_v4s8_constidx_0_i32
body: |
bb.0:
liveins: $vgpr0
; CHECK-LABEL: name: extract_vector_elt_v4s8_constidx_0_i32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C4]]
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C4]]
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32)
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C4]]
; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C1]](s32)
; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C4]]
; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C2]](s32)
; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]]
; CHECK: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[OR2]], [[C3]](s32)
; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
; CHECK: $vgpr0 = COPY [[COPY5]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(<4 x s8>) = G_BITCAST %0
%2:_(s32) = G_CONSTANT i32 0
%3:_(s8) = G_EXTRACT_VECTOR_ELT %1, %2
%4:_(s32) = G_ANYEXT %3
$vgpr0 = COPY %4
...
---
name: extract_vector_elt_v4s8_constidx_1_i32
body: |
bb.0:
liveins: $vgpr0
; CHECK-LABEL: name: extract_vector_elt_v4s8_constidx_1_i32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]]
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32)
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C3]]
; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C1]](s32)
; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C3]]
; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C2]](s32)
; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]]
; CHECK: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[OR2]], [[C]](s32)
; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
; CHECK: $vgpr0 = COPY [[COPY5]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(<4 x s8>) = G_BITCAST %0
%2:_(s32) = G_CONSTANT i32 1
%3:_(s8) = G_EXTRACT_VECTOR_ELT %1, %2
%4:_(s32) = G_ANYEXT %3
$vgpr0 = COPY %4
...
---
name: extract_vector_elt_v4s8_constidx_2_i32
body: |
bb.0:
liveins: $vgpr0
; CHECK-LABEL: name: extract_vector_elt_v4s8_constidx_2_i32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]]
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32)
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C3]]
; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C1]](s32)
; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C3]]
; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C2]](s32)
; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]]
; CHECK: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[OR2]], [[C1]](s32)
; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
; CHECK: $vgpr0 = COPY [[COPY5]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(<4 x s8>) = G_BITCAST %0
%2:_(s32) = G_CONSTANT i32 2
%3:_(s8) = G_EXTRACT_VECTOR_ELT %1, %2
%4:_(s32) = G_ANYEXT %3
$vgpr0 = COPY %4
...
---
name: extract_vector_elt_v4s8_constidx_3_i32
body: |
bb.0:
liveins: $vgpr0
; CHECK-LABEL: name: extract_vector_elt_v4s8_constidx_3_i32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]]
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32)
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C3]]
; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C1]](s32)
; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C3]]
; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C2]](s32)
; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]]
; CHECK: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[OR2]], [[C2]](s32)
; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
; CHECK: $vgpr0 = COPY [[COPY5]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(<4 x s8>) = G_BITCAST %0
%2:_(s32) = G_CONSTANT i32 3
%3:_(s8) = G_EXTRACT_VECTOR_ELT %1, %2
%4:_(s32) = G_ANYEXT %3
$vgpr0 = COPY %4
...
---
name: extract_vector_elt_v8s8_varidx_i32
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2
; CHECK-LABEL: name: extract_vector_elt_v8s8_varidx_i32
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV]], [[C]](s32)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; CHECK: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32)
; CHECK: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; CHECK: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C1]](s16)
; CHECK: [[LSHR3:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C1]](s16)
; CHECK: [[LSHR4:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C1]](s16)
; CHECK: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC3]], [[C1]](s16)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C2]]
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C2]]
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C3]](s32)
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C2]]
; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C]](s32)
; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR3]](s16)
; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C2]]
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C4]](s32)
; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]]
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; CHECK: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C2]]
; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR4]](s16)
; CHECK: [[AND5:%[0-9]+]]:_(s32) = G_AND [[ANYEXT2]], [[C2]]
; CHECK: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[C3]](s32)
; CHECK: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL3]]
; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
; CHECK: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C2]]
; CHECK: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[AND6]], [[C]](s32)
; CHECK: [[OR4:%[0-9]+]]:_(s32) = G_OR [[OR3]], [[SHL4]]
; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR5]](s16)
; CHECK: [[AND7:%[0-9]+]]:_(s32) = G_AND [[ANYEXT3]], [[C2]]
; CHECK: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C4]](s32)
; CHECK: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]]
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32)
; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; CHECK: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C5]](s32)
; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[BUILD_VECTOR]](<2 x s32>), [[LSHR6]](s32)
; CHECK: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
; CHECK: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C6]]
; CHECK: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[AND8]], [[C6]](s32)
; CHECK: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[EVEC]], [[SHL6]](s32)
; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32)
; CHECK: $vgpr0 = COPY [[COPY6]](s32)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
%2:_(<8 x s8>) = G_BITCAST %0
%3:_(s8) = G_EXTRACT_VECTOR_ELT %2, %1
%4:_(s32) = G_ANYEXT %3
$vgpr0 = COPY %4
...
---
name: extract_vector_elt_v8s8_constidx_0_i32
body: |
bb.0:
liveins: $vgpr0_vgpr1
; CHECK-LABEL: name: extract_vector_elt_v8s8_constidx_0_i32
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV]], [[C1]](s32)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; CHECK: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C2]](s16)
; CHECK: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C2]](s16)
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C3]]
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C4]](s32)
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C1]](s32)
; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C3]]
; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C5]](s32)
; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]]
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; CHECK: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
; CHECK: $vgpr0 = COPY [[COPY4]](s32)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s32) = G_CONSTANT i32 0
%2:_(<8 x s8>) = G_BITCAST %0
%3:_(s8) = G_EXTRACT_VECTOR_ELT %2, %1
%4:_(s32) = G_ANYEXT %3
$vgpr0 = COPY %4
...
---
name: extract_vector_elt_v8s8_constidx_1_i32
body: |
bb.0:
liveins: $vgpr0_vgpr1
; CHECK-LABEL: name: extract_vector_elt_v8s8_constidx_1_i32
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV]], [[C]](s32)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; CHECK: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C1]](s16)
; CHECK: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C1]](s16)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]]
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C2]]
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C3]](s32)
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C2]]
; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C]](s32)
; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C2]]
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C4]](s32)
; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]]
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; CHECK: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C3]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
; CHECK: $vgpr0 = COPY [[COPY4]](s32)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s32) = G_CONSTANT i32 1
%2:_(<8 x s8>) = G_BITCAST %0
%3:_(s8) = G_EXTRACT_VECTOR_ELT %2, %1
%4:_(s32) = G_ANYEXT %3
$vgpr0 = COPY %4
...
---
name: extract_vector_elt_v8s8_constidx_3_i32
body: |
bb.0:
liveins: $vgpr0_vgpr1
; CHECK-LABEL: name: extract_vector_elt_v8s8_constidx_3_i32
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV]], [[C]](s32)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; CHECK: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C1]](s16)
; CHECK: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C1]](s16)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]]
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C2]]
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C3]](s32)
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C2]]
; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C]](s32)
; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C2]]
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C4]](s32)
; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]]
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; CHECK: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C4]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
; CHECK: $vgpr0 = COPY [[COPY4]](s32)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s32) = G_CONSTANT i32 3
%2:_(<8 x s8>) = G_BITCAST %0
%3:_(s8) = G_EXTRACT_VECTOR_ELT %2, %1
%4:_(s32) = G_ANYEXT %3
$vgpr0 = COPY %4
...
---
name: extract_vector_elt_v8s8_constidx_4_i32
body: |
bb.0:
liveins: $vgpr0_vgpr1
; CHECK-LABEL: name: extract_vector_elt_v8s8_constidx_4_i32
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; CHECK: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C1]](s16)
; CHECK: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C1]](s16)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]]
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C2]]
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C3]](s32)
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C2]]
; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C]](s32)
; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C2]]
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C4]](s32)
; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]]
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C5]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
; CHECK: $vgpr0 = COPY [[COPY4]](s32)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s32) = G_CONSTANT i32 4
%2:_(<8 x s8>) = G_BITCAST %0
%3:_(s8) = G_EXTRACT_VECTOR_ELT %2, %1
%4:_(s32) = G_ANYEXT %3
$vgpr0 = COPY %4
...
---
name: extract_vector_elt_v8s8_constidx_5_i32
body: |
bb.0:
liveins: $vgpr0_vgpr1
; CHECK-LABEL: name: extract_vector_elt_v8s8_constidx_5_i32
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; CHECK: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C1]](s16)
; CHECK: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C1]](s16)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]]
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C2]]
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C3]](s32)
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C2]]
; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C]](s32)
; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C2]]
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C4]](s32)
; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]]
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; CHECK: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C3]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
; CHECK: $vgpr0 = COPY [[COPY4]](s32)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s32) = G_CONSTANT i32 5
%2:_(<8 x s8>) = G_BITCAST %0
%3:_(s8) = G_EXTRACT_VECTOR_ELT %2, %1
%4:_(s32) = G_ANYEXT %3
$vgpr0 = COPY %4
...
---
name: extract_vector_elt_v8s8_constidx_7_i32
body: |
bb.0:
liveins: $vgpr0_vgpr1
; CHECK-LABEL: name: extract_vector_elt_v8s8_constidx_7_i32
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; CHECK: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[C1]](s16)
; CHECK: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C1]](s16)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]]
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C2]]
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C3]](s32)
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C2]]
; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C]](s32)
; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR2]](s16)
; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C2]]
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C4]](s32)
; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]]
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
; CHECK: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY3]], [[C4]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
; CHECK: $vgpr0 = COPY [[COPY4]](s32)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s32) = G_CONSTANT i32 7
%2:_(<8 x s8>) = G_BITCAST %0
%3:_(s8) = G_EXTRACT_VECTOR_ELT %2, %1
%4:_(s32) = G_ANYEXT %3
$vgpr0 = COPY %4
...
---
name: extract_vector_elt_v2s16_varidx_i32
body: |
bb.0:
liveins: $vgpr0, $vgpr1
; CHECK-LABEL: name: extract_vector_elt_v2s16_varidx_i32
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND]], [[C1]](s32)
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[SHL]](s32)
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: $vgpr0 = COPY [[COPY2]](s32)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s16) = G_EXTRACT_VECTOR_ELT %0, %1
%3:_(s32) = G_ANYEXT %2
$vgpr0 = COPY %3
...
---
name: extract_vector_elt_v2s16_idx0_i32
body: |
bb.0:
liveins: $vgpr0
; CHECK-LABEL: name: extract_vector_elt_v2s16_idx0_i32
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: $vgpr0 = COPY [[COPY1]](s32)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(s32) = G_CONSTANT i32 0
%2:_(s16) = G_EXTRACT_VECTOR_ELT %0, %1
%3:_(s32) = G_ANYEXT %2
$vgpr0 = COPY %3
...
---
name: extract_vector_elt_v2s16_idx1_i32
body: |
bb.0:
liveins: $vgpr0
; CHECK-LABEL: name: extract_vector_elt_v2s16_idx1_i32
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
; CHECK: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: $vgpr0 = COPY [[COPY1]](s32)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(s32) = G_CONSTANT i32 1
%2:_(s16) = G_EXTRACT_VECTOR_ELT %0, %1
%3:_(s32) = G_ANYEXT %2
$vgpr0 = COPY %3
...
---
name: extract_vector_elt_v2s16_idx2_i32
body: |
bb.0:
liveins: $vgpr0
; CHECK-LABEL: name: extract_vector_elt_v2s16_idx2_i32
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
; CHECK: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: $vgpr0 = COPY [[COPY1]](s32)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(s32) = G_CONSTANT i32 2
%2:_(s16) = G_EXTRACT_VECTOR_ELT %0, %1
%3:_(s32) = G_ANYEXT %2
$vgpr0 = COPY %3
...
---
name: extract_vector_elt_v3s16_varidx_i32
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2, $vgpr3
; CHECK-LABEL: name: extract_vector_elt_v3s16_varidx_i32
; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
; CHECK: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY [[COPY]](<3 x s32>)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>)
; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV]], 16
; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV1]], 16
; CHECK: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV2]], 16
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32), [[SEXT_INREG2]](s32)
; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[BUILD_VECTOR]](<3 x s32>), [[COPY1]](s32)
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[EVEC]](s32)
; CHECK: $vgpr0 = COPY [[COPY3]](s32)
%0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(s32) = COPY $vgpr3
%2:_(<3 x s16>) = G_TRUNC %0
%3:_(s16) = G_EXTRACT_VECTOR_ELT %2, %1
%4:_(s32) = G_ANYEXT %3
$vgpr0 = COPY %4
...
---
name: extract_vector_elt_v3s16_idx0_i32
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2
; CHECK-LABEL: name: extract_vector_elt_v3s16_idx0_i32
; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
; CHECK: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY [[COPY]](<3 x s32>)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV]], 16
; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV1]], 16
; CHECK: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV2]], 16
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32), [[SEXT_INREG2]](s32)
; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[BUILD_VECTOR]](<3 x s32>), 0
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[EXTRACT]](s32)
; CHECK: $vgpr0 = COPY [[COPY2]](s32)
%0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(s32) = G_CONSTANT i32 0
%2:_(<3 x s16>) = G_TRUNC %0
%3:_(s16) = G_EXTRACT_VECTOR_ELT %2, %1
%4:_(s32) = G_ANYEXT %3
$vgpr0 = COPY %4
...
---
name: extract_vector_elt_v3s16_idx1_i32
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2
; CHECK-LABEL: name: extract_vector_elt_v3s16_idx1_i32
; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
; CHECK: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY [[COPY]](<3 x s32>)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV]], 16
; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV1]], 16
; CHECK: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV2]], 16
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32), [[SEXT_INREG2]](s32)
; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[BUILD_VECTOR]](<3 x s32>), 32
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[EXTRACT]](s32)
; CHECK: $vgpr0 = COPY [[COPY2]](s32)
%0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(s32) = G_CONSTANT i32 1
%2:_(<3 x s16>) = G_TRUNC %0
%3:_(s16) = G_EXTRACT_VECTOR_ELT %2, %1
%4:_(s32) = G_ANYEXT %3
$vgpr0 = COPY %4
...
---
name: extract_vector_elt_v3s16_idx2_i32
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2
; CHECK-LABEL: name: extract_vector_elt_v3s16_idx2_i32
; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
; CHECK: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY [[COPY]](<3 x s32>)
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV]], 16
; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV1]], 16
; CHECK: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UV2]], 16
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32), [[SEXT_INREG2]](s32)
; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[BUILD_VECTOR]](<3 x s32>), 64
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[EXTRACT]](s32)
; CHECK: $vgpr0 = COPY [[COPY2]](s32)
%0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(s32) = G_CONSTANT i32 2
%2:_(<3 x s16>) = G_TRUNC %0
%3:_(s16) = G_EXTRACT_VECTOR_ELT %2, %1
%4:_(s32) = G_ANYEXT %3
$vgpr0 = COPY %4
...
---
name: extract_vector_elt_v3s16_idx3_i32
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2
; CHECK-LABEL: name: extract_vector_elt_v3s16_idx3_i32
; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
; CHECK: $vgpr0 = COPY [[COPY1]](s32)
%0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(s32) = G_CONSTANT i32 3
%2:_(<3 x s16>) = G_TRUNC %0
%3:_(s16) = G_EXTRACT_VECTOR_ELT %2, %1
%4:_(s32) = G_ANYEXT %3
$vgpr0 = COPY %4
...
---
name: extract_vector_elt_v4s16_varidx_i32
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2
; CHECK-LABEL: name: extract_vector_elt_v4s16_varidx_i32
; CHECK: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
; CHECK: [[BITCAST:%[0-9]+]]:_(<2 x s32>) = G_BITCAST [[COPY]](<4 x s16>)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[BITCAST]](<2 x s32>), [[LSHR]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND]], [[C1]](s32)
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[EVEC]], [[SHL]](s32)
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
; CHECK: $vgpr0 = COPY [[COPY2]](s32)
%0:_(<4 x s16>) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
%2:_(s16) = G_EXTRACT_VECTOR_ELT %0, %1
%3:_(s32) = G_ANYEXT %2
$vgpr0 = COPY %3
...
---
name: extract_vector_elt_v2s128_varidx_i32
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, $vgpr8
; CHECK-LABEL: name: extract_vector_elt_v2s128_varidx_i32
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s128>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr8
; CHECK: [[BITCAST:%[0-9]+]]:_(<4 x s64>) = G_BITCAST [[COPY]](<2 x s128>)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY1]], [[C]]
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[MUL]], [[C1]]
; CHECK: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[BITCAST]](<4 x s64>), [[ADD]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[MUL]], [[C2]]
; CHECK: [[EVEC1:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[BITCAST]](<4 x s64>), [[ADD1]](s32)
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[EVEC]](s64), [[EVEC1]](s64)
; CHECK: [[BITCAST1:%[0-9]+]]:_(s128) = G_BITCAST [[BUILD_VECTOR]](<2 x s64>)
; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST1]](s128)
%0:_(<2 x s128>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
%1:_(s32) = COPY $vgpr8
%2:_(s128) = G_EXTRACT_VECTOR_ELT %0, %1
$vgpr0_vgpr1_vgpr2_vgpr3 = COPY %2
...
---
name: extract_vector_elt_v2i32_varidx_i64
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; CHECK-LABEL: name: extract_vector_elt_v2i32_varidx_i64
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s32>), [[TRUNC]](s32)
; CHECK: $vgpr0 = COPY [[EVEC]](s32)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s32) = G_EXTRACT_VECTOR_ELT %0, %1
$vgpr0 = COPY %2
...
---
name: extract_vector_elt_0_v2i64
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK-LABEL: name: extract_vector_elt_0_v2i64
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK: [[EXTRACT:%[0-9]+]]:_(s64) = G_EXTRACT [[COPY]](<2 x s64>), 0
; CHECK: $vgpr0_vgpr1 = COPY [[EXTRACT]](s64)
%0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%1:_(s32) = G_CONSTANT i32 0
%2:_(s64) = G_EXTRACT_VECTOR_ELT %0, %1
$vgpr0_vgpr1 = COPY %2
...
---
name: extract_vector_elt_0_v8i64
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK-LABEL: name: extract_vector_elt_0_v8i64
; CHECK: [[DEF:%[0-9]+]]:_(<8 x s64>) = G_IMPLICIT_DEF
; CHECK: [[EXTRACT:%[0-9]+]]:_(s64) = G_EXTRACT [[DEF]](<8 x s64>), 0
; CHECK: $vgpr0_vgpr1 = COPY [[EXTRACT]](s64)
%0:_(<8 x s64>) = G_IMPLICIT_DEF
%1:_(s32) = G_CONSTANT i32 0
%2:_(s64) = G_EXTRACT_VECTOR_ELT %0, %1
$vgpr0_vgpr1 = COPY %2
...
---
name: extract_vector_elt_0_v16i64
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK-LABEL: name: extract_vector_elt_0_v16i64
; CHECK: [[DEF:%[0-9]+]]:_(<16 x s64>) = G_IMPLICIT_DEF
; CHECK: [[EXTRACT:%[0-9]+]]:_(s64) = G_EXTRACT [[DEF]](<16 x s64>), 0
; CHECK: $vgpr0_vgpr1 = COPY [[EXTRACT]](s64)
%0:_(<16 x s64>) = G_IMPLICIT_DEF
%1:_(s32) = G_CONSTANT i32 0
%2:_(s64) = G_EXTRACT_VECTOR_ELT %0, %1
$vgpr0_vgpr1 = COPY %2
...
# Make sure we look through casts looking for a constant index.
---
name: extract_vector_elt_look_through_trunc_0_v4i32
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK-LABEL: name: extract_vector_elt_look_through_trunc_0_v4i32
; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY]](<4 x s32>), 0
; CHECK: $vgpr0 = COPY [[EXTRACT]](s32)
%0:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%1:_(s64) = G_CONSTANT i64 0
%2:_(s32) = G_TRUNC %1
%3:_(s32) = G_EXTRACT_VECTOR_ELT %0, %2
$vgpr0 = COPY %3
...
---
name: extract_vector_elt_7_v64s32
body: |
bb.0:
liveins: $sgpr0_sgpr1
; CHECK-LABEL: name: extract_vector_elt_7_v64s32
; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load 64, align 4, addrspace 4)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 64 + 64, align 4, addrspace 4)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load 64 + 128, align 4, addrspace 4)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 192
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load 64 + 192, align 4, addrspace 4)
; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[LOAD]](<16 x s32>), 224
; CHECK: S_ENDPGM 0, implicit [[EXTRACT]](s32)
%0:_(p1) = COPY $sgpr0_sgpr1
%1:_(s32) = G_CONSTANT i32 7
%2:_(<64 x s32>) = G_LOAD %0 :: (load 256, align 4, addrspace 4)
%3:_(s32) = G_EXTRACT_VECTOR_ELT %2, %1
S_ENDPGM 0, implicit %3
...
---
name: extract_vector_elt_33_v64s32
body: |
bb.0:
liveins: $sgpr0_sgpr1
; CHECK-LABEL: name: extract_vector_elt_33_v64s32
; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load 64, align 4, addrspace 4)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 64 + 64, align 4, addrspace 4)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load 64 + 128, align 4, addrspace 4)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 192
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load 64 + 192, align 4, addrspace 4)
; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[LOAD2]](<16 x s32>), 32
; CHECK: S_ENDPGM 0, implicit [[EXTRACT]](s32)
%0:_(p1) = COPY $sgpr0_sgpr1
%1:_(s32) = G_CONSTANT i32 33
%2:_(<64 x s32>) = G_LOAD %0 :: (load 256, align 4, addrspace 4)
%3:_(s32) = G_EXTRACT_VECTOR_ELT %2, %1
S_ENDPGM 0, implicit %3
...
# Test handling of out of bounds indexes
---
name: extract_vector_elt_64_65_v64s32
body: |
bb.0:
liveins: $sgpr0_sgpr1
; CHECK-LABEL: name: extract_vector_elt_64_65_v64s32
; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
; CHECK: S_ENDPGM 0, implicit [[COPY1]](s32), implicit [[DEF]](s32)
%0:_(p1) = COPY $sgpr0_sgpr1
%1:_(s32) = G_CONSTANT i32 64
%2:_(<64 x s32>) = G_LOAD %0 :: (load 256, align 4, addrspace 4)
%3:_(s32) = G_EXTRACT_VECTOR_ELT %2, %1
%4:_(s32) = G_CONSTANT i32 65
%5:_(s32) = G_EXTRACT_VECTOR_ELT %2, %4
S_ENDPGM 0, implicit %3, implicit %5
...
---
name: extract_vector_elt_33_v64p3
body: |
bb.0:
liveins: $sgpr0_sgpr1
; CHECK-LABEL: name: extract_vector_elt_33_v64p3
; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load 64, align 4, addrspace 4)
; CHECK: [[BITCAST:%[0-9]+]]:_(<16 x p3>) = G_BITCAST [[LOAD]](<16 x s32>)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 64 + 64, align 4, addrspace 4)
; CHECK: [[BITCAST1:%[0-9]+]]:_(<16 x p3>) = G_BITCAST [[LOAD1]](<16 x s32>)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load 64 + 128, align 4, addrspace 4)
; CHECK: [[BITCAST2:%[0-9]+]]:_(<16 x p3>) = G_BITCAST [[LOAD2]](<16 x s32>)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 192
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load 64 + 192, align 4, addrspace 4)
; CHECK: [[BITCAST3:%[0-9]+]]:_(<16 x p3>) = G_BITCAST [[LOAD3]](<16 x s32>)
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0
; CHECK: [[UV:%[0-9]+]]:_(p3), [[UV1:%[0-9]+]]:_(p3), [[UV2:%[0-9]+]]:_(p3), [[UV3:%[0-9]+]]:_(p3), [[UV4:%[0-9]+]]:_(p3), [[UV5:%[0-9]+]]:_(p3), [[UV6:%[0-9]+]]:_(p3), [[UV7:%[0-9]+]]:_(p3), [[UV8:%[0-9]+]]:_(p3), [[UV9:%[0-9]+]]:_(p3), [[UV10:%[0-9]+]]:_(p3), [[UV11:%[0-9]+]]:_(p3), [[UV12:%[0-9]+]]:_(p3), [[UV13:%[0-9]+]]:_(p3), [[UV14:%[0-9]+]]:_(p3), [[UV15:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[BITCAST]](<16 x p3>)
; CHECK: [[UV16:%[0-9]+]]:_(p3), [[UV17:%[0-9]+]]:_(p3), [[UV18:%[0-9]+]]:_(p3), [[UV19:%[0-9]+]]:_(p3), [[UV20:%[0-9]+]]:_(p3), [[UV21:%[0-9]+]]:_(p3), [[UV22:%[0-9]+]]:_(p3), [[UV23:%[0-9]+]]:_(p3), [[UV24:%[0-9]+]]:_(p3), [[UV25:%[0-9]+]]:_(p3), [[UV26:%[0-9]+]]:_(p3), [[UV27:%[0-9]+]]:_(p3), [[UV28:%[0-9]+]]:_(p3), [[UV29:%[0-9]+]]:_(p3), [[UV30:%[0-9]+]]:_(p3), [[UV31:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[BITCAST1]](<16 x p3>)
; CHECK: [[UV32:%[0-9]+]]:_(p3), [[UV33:%[0-9]+]]:_(p3), [[UV34:%[0-9]+]]:_(p3), [[UV35:%[0-9]+]]:_(p3), [[UV36:%[0-9]+]]:_(p3), [[UV37:%[0-9]+]]:_(p3), [[UV38:%[0-9]+]]:_(p3), [[UV39:%[0-9]+]]:_(p3), [[UV40:%[0-9]+]]:_(p3), [[UV41:%[0-9]+]]:_(p3), [[UV42:%[0-9]+]]:_(p3), [[UV43:%[0-9]+]]:_(p3), [[UV44:%[0-9]+]]:_(p3), [[UV45:%[0-9]+]]:_(p3), [[UV46:%[0-9]+]]:_(p3), [[UV47:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[BITCAST2]](<16 x p3>)
; CHECK: [[UV48:%[0-9]+]]:_(p3), [[UV49:%[0-9]+]]:_(p3), [[UV50:%[0-9]+]]:_(p3), [[UV51:%[0-9]+]]:_(p3), [[UV52:%[0-9]+]]:_(p3), [[UV53:%[0-9]+]]:_(p3), [[UV54:%[0-9]+]]:_(p3), [[UV55:%[0-9]+]]:_(p3), [[UV56:%[0-9]+]]:_(p3), [[UV57:%[0-9]+]]:_(p3), [[UV58:%[0-9]+]]:_(p3), [[UV59:%[0-9]+]]:_(p3), [[UV60:%[0-9]+]]:_(p3), [[UV61:%[0-9]+]]:_(p3), [[UV62:%[0-9]+]]:_(p3), [[UV63:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[BITCAST3]](<16 x p3>)
; CHECK: G_STORE [[UV]](p3), [[FRAME_INDEX]](p5) :: (store 4 into %stack.0, align 256, addrspace 5)
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C3]](s32)
; CHECK: G_STORE [[UV1]](p3), [[PTR_ADD3]](p5) :: (store 4 into %stack.0 + 4, align 256, addrspace 5)
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C4]](s32)
; CHECK: G_STORE [[UV2]](p3), [[PTR_ADD4]](p5) :: (store 4 into %stack.0 + 8, align 256, addrspace 5)
; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
; CHECK: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C5]](s32)
; CHECK: G_STORE [[UV3]](p3), [[PTR_ADD5]](p5) :: (store 4 into %stack.0 + 12, align 256, addrspace 5)
; CHECK: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C6]](s32)
; CHECK: G_STORE [[UV4]](p3), [[PTR_ADD6]](p5) :: (store 4 into %stack.0 + 16, align 256, addrspace 5)
; CHECK: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
; CHECK: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C7]](s32)
; CHECK: G_STORE [[UV5]](p3), [[PTR_ADD7]](p5) :: (store 4 into %stack.0 + 20, align 256, addrspace 5)
; CHECK: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C8]](s32)
; CHECK: G_STORE [[UV6]](p3), [[PTR_ADD8]](p5) :: (store 4 into %stack.0 + 24, align 256, addrspace 5)
; CHECK: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
; CHECK: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C9]](s32)
; CHECK: G_STORE [[UV7]](p3), [[PTR_ADD9]](p5) :: (store 4 into %stack.0 + 28, align 256, addrspace 5)
; CHECK: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; CHECK: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C10]](s32)
; CHECK: G_STORE [[UV8]](p3), [[PTR_ADD10]](p5) :: (store 4 into %stack.0 + 32, align 256, addrspace 5)
; CHECK: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 36
; CHECK: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C11]](s32)
; CHECK: G_STORE [[UV9]](p3), [[PTR_ADD11]](p5) :: (store 4 into %stack.0 + 36, align 256, addrspace 5)
; CHECK: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
; CHECK: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C12]](s32)
; CHECK: G_STORE [[UV10]](p3), [[PTR_ADD12]](p5) :: (store 4 into %stack.0 + 40, align 256, addrspace 5)
; CHECK: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 44
; CHECK: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C13]](s32)
; CHECK: G_STORE [[UV11]](p3), [[PTR_ADD13]](p5) :: (store 4 into %stack.0 + 44, align 256, addrspace 5)
; CHECK: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
; CHECK: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C14]](s32)
; CHECK: G_STORE [[UV12]](p3), [[PTR_ADD14]](p5) :: (store 4 into %stack.0 + 48, align 256, addrspace 5)
; CHECK: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 52
; CHECK: [[PTR_ADD15:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C15]](s32)
; CHECK: G_STORE [[UV13]](p3), [[PTR_ADD15]](p5) :: (store 4 into %stack.0 + 52, align 256, addrspace 5)
; CHECK: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
; CHECK: [[PTR_ADD16:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C16]](s32)
; CHECK: G_STORE [[UV14]](p3), [[PTR_ADD16]](p5) :: (store 4 into %stack.0 + 56, align 256, addrspace 5)
; CHECK: [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 60
; CHECK: [[PTR_ADD17:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C17]](s32)
; CHECK: G_STORE [[UV15]](p3), [[PTR_ADD17]](p5) :: (store 4 into %stack.0 + 60, align 256, addrspace 5)
; CHECK: [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
; CHECK: [[PTR_ADD18:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C18]](s32)
; CHECK: G_STORE [[UV16]](p3), [[PTR_ADD18]](p5) :: (store 4 into %stack.0 + 64, align 256, addrspace 5)
; CHECK: [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 68
; CHECK: [[PTR_ADD19:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C19]](s32)
; CHECK: G_STORE [[UV17]](p3), [[PTR_ADD19]](p5) :: (store 4 into %stack.0 + 68, align 256, addrspace 5)
; CHECK: [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 72
; CHECK: [[PTR_ADD20:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C20]](s32)
; CHECK: G_STORE [[UV18]](p3), [[PTR_ADD20]](p5) :: (store 4 into %stack.0 + 72, align 256, addrspace 5)
; CHECK: [[C21:%[0-9]+]]:_(s32) = G_CONSTANT i32 76
; CHECK: [[PTR_ADD21:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C21]](s32)
; CHECK: G_STORE [[UV19]](p3), [[PTR_ADD21]](p5) :: (store 4 into %stack.0 + 76, align 256, addrspace 5)
; CHECK: [[C22:%[0-9]+]]:_(s32) = G_CONSTANT i32 80
; CHECK: [[PTR_ADD22:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C22]](s32)
; CHECK: G_STORE [[UV20]](p3), [[PTR_ADD22]](p5) :: (store 4 into %stack.0 + 80, align 256, addrspace 5)
; CHECK: [[C23:%[0-9]+]]:_(s32) = G_CONSTANT i32 84
; CHECK: [[PTR_ADD23:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C23]](s32)
; CHECK: G_STORE [[UV21]](p3), [[PTR_ADD23]](p5) :: (store 4 into %stack.0 + 84, align 256, addrspace 5)
; CHECK: [[C24:%[0-9]+]]:_(s32) = G_CONSTANT i32 88
; CHECK: [[PTR_ADD24:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C24]](s32)
; CHECK: G_STORE [[UV22]](p3), [[PTR_ADD24]](p5) :: (store 4 into %stack.0 + 88, align 256, addrspace 5)
; CHECK: [[C25:%[0-9]+]]:_(s32) = G_CONSTANT i32 92
; CHECK: [[PTR_ADD25:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C25]](s32)
; CHECK: G_STORE [[UV23]](p3), [[PTR_ADD25]](p5) :: (store 4 into %stack.0 + 92, align 256, addrspace 5)
; CHECK: [[C26:%[0-9]+]]:_(s32) = G_CONSTANT i32 96
; CHECK: [[PTR_ADD26:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C26]](s32)
; CHECK: G_STORE [[UV24]](p3), [[PTR_ADD26]](p5) :: (store 4 into %stack.0 + 96, align 256, addrspace 5)
; CHECK: [[C27:%[0-9]+]]:_(s32) = G_CONSTANT i32 100
; CHECK: [[PTR_ADD27:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C27]](s32)
; CHECK: G_STORE [[UV25]](p3), [[PTR_ADD27]](p5) :: (store 4 into %stack.0 + 100, align 256, addrspace 5)
; CHECK: [[C28:%[0-9]+]]:_(s32) = G_CONSTANT i32 104
; CHECK: [[PTR_ADD28:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C28]](s32)
; CHECK: G_STORE [[UV26]](p3), [[PTR_ADD28]](p5) :: (store 4 into %stack.0 + 104, align 256, addrspace 5)
; CHECK: [[C29:%[0-9]+]]:_(s32) = G_CONSTANT i32 108
; CHECK: [[PTR_ADD29:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C29]](s32)
; CHECK: G_STORE [[UV27]](p3), [[PTR_ADD29]](p5) :: (store 4 into %stack.0 + 108, align 256, addrspace 5)
; CHECK: [[C30:%[0-9]+]]:_(s32) = G_CONSTANT i32 112
; CHECK: [[PTR_ADD30:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C30]](s32)
; CHECK: G_STORE [[UV28]](p3), [[PTR_ADD30]](p5) :: (store 4 into %stack.0 + 112, align 256, addrspace 5)
; CHECK: [[C31:%[0-9]+]]:_(s32) = G_CONSTANT i32 116
; CHECK: [[PTR_ADD31:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C31]](s32)
; CHECK: G_STORE [[UV29]](p3), [[PTR_ADD31]](p5) :: (store 4 into %stack.0 + 116, align 256, addrspace 5)
; CHECK: [[C32:%[0-9]+]]:_(s32) = G_CONSTANT i32 120
; CHECK: [[PTR_ADD32:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C32]](s32)
; CHECK: G_STORE [[UV30]](p3), [[PTR_ADD32]](p5) :: (store 4 into %stack.0 + 120, align 256, addrspace 5)
; CHECK: [[C33:%[0-9]+]]:_(s32) = G_CONSTANT i32 124
; CHECK: [[PTR_ADD33:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C33]](s32)
; CHECK: G_STORE [[UV31]](p3), [[PTR_ADD33]](p5) :: (store 4 into %stack.0 + 124, align 256, addrspace 5)
; CHECK: [[C34:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
; CHECK: [[PTR_ADD34:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C34]](s32)
; CHECK: G_STORE [[UV32]](p3), [[PTR_ADD34]](p5) :: (store 4 into %stack.0 + 128, align 256, addrspace 5)
; CHECK: [[C35:%[0-9]+]]:_(s32) = G_CONSTANT i32 132
; CHECK: [[PTR_ADD35:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C35]](s32)
; CHECK: [[COPY1:%[0-9]+]]:_(p5) = COPY [[PTR_ADD35]](p5)
; CHECK: G_STORE [[UV33]](p3), [[COPY1]](p5) :: (store 4 into %stack.0 + 132, align 256, addrspace 5)
; CHECK: [[C36:%[0-9]+]]:_(s32) = G_CONSTANT i32 136
; CHECK: [[PTR_ADD36:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C36]](s32)
; CHECK: G_STORE [[UV34]](p3), [[PTR_ADD36]](p5) :: (store 4 into %stack.0 + 136, align 256, addrspace 5)
; CHECK: [[C37:%[0-9]+]]:_(s32) = G_CONSTANT i32 140
; CHECK: [[PTR_ADD37:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C37]](s32)
; CHECK: G_STORE [[UV35]](p3), [[PTR_ADD37]](p5) :: (store 4 into %stack.0 + 140, align 256, addrspace 5)
; CHECK: [[C38:%[0-9]+]]:_(s32) = G_CONSTANT i32 144
; CHECK: [[PTR_ADD38:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C38]](s32)
; CHECK: G_STORE [[UV36]](p3), [[PTR_ADD38]](p5) :: (store 4 into %stack.0 + 144, align 256, addrspace 5)
; CHECK: [[C39:%[0-9]+]]:_(s32) = G_CONSTANT i32 148
; CHECK: [[PTR_ADD39:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C39]](s32)
; CHECK: G_STORE [[UV37]](p3), [[PTR_ADD39]](p5) :: (store 4 into %stack.0 + 148, align 256, addrspace 5)
; CHECK: [[C40:%[0-9]+]]:_(s32) = G_CONSTANT i32 152
; CHECK: [[PTR_ADD40:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C40]](s32)
; CHECK: G_STORE [[UV38]](p3), [[PTR_ADD40]](p5) :: (store 4 into %stack.0 + 152, align 256, addrspace 5)
; CHECK: [[C41:%[0-9]+]]:_(s32) = G_CONSTANT i32 156
; CHECK: [[PTR_ADD41:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C41]](s32)
; CHECK: G_STORE [[UV39]](p3), [[PTR_ADD41]](p5) :: (store 4 into %stack.0 + 156, align 256, addrspace 5)
; CHECK: [[C42:%[0-9]+]]:_(s32) = G_CONSTANT i32 160
; CHECK: [[PTR_ADD42:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C42]](s32)
; CHECK: G_STORE [[UV40]](p3), [[PTR_ADD42]](p5) :: (store 4 into %stack.0 + 160, align 256, addrspace 5)
; CHECK: [[C43:%[0-9]+]]:_(s32) = G_CONSTANT i32 164
; CHECK: [[PTR_ADD43:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C43]](s32)
; CHECK: G_STORE [[UV41]](p3), [[PTR_ADD43]](p5) :: (store 4 into %stack.0 + 164, align 256, addrspace 5)
; CHECK: [[C44:%[0-9]+]]:_(s32) = G_CONSTANT i32 168
; CHECK: [[PTR_ADD44:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C44]](s32)
; CHECK: G_STORE [[UV42]](p3), [[PTR_ADD44]](p5) :: (store 4 into %stack.0 + 168, align 256, addrspace 5)
; CHECK: [[C45:%[0-9]+]]:_(s32) = G_CONSTANT i32 172
; CHECK: [[PTR_ADD45:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C45]](s32)
; CHECK: G_STORE [[UV43]](p3), [[PTR_ADD45]](p5) :: (store 4 into %stack.0 + 172, align 256, addrspace 5)
; CHECK: [[C46:%[0-9]+]]:_(s32) = G_CONSTANT i32 176
; CHECK: [[PTR_ADD46:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C46]](s32)
; CHECK: G_STORE [[UV44]](p3), [[PTR_ADD46]](p5) :: (store 4 into %stack.0 + 176, align 256, addrspace 5)
; CHECK: [[C47:%[0-9]+]]:_(s32) = G_CONSTANT i32 180
; CHECK: [[PTR_ADD47:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C47]](s32)
; CHECK: G_STORE [[UV45]](p3), [[PTR_ADD47]](p5) :: (store 4 into %stack.0 + 180, align 256, addrspace 5)
; CHECK: [[C48:%[0-9]+]]:_(s32) = G_CONSTANT i32 184
; CHECK: [[PTR_ADD48:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C48]](s32)
; CHECK: G_STORE [[UV46]](p3), [[PTR_ADD48]](p5) :: (store 4 into %stack.0 + 184, align 256, addrspace 5)
; CHECK: [[C49:%[0-9]+]]:_(s32) = G_CONSTANT i32 188
; CHECK: [[PTR_ADD49:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C49]](s32)
; CHECK: G_STORE [[UV47]](p3), [[PTR_ADD49]](p5) :: (store 4 into %stack.0 + 188, align 256, addrspace 5)
; CHECK: [[C50:%[0-9]+]]:_(s32) = G_CONSTANT i32 192
; CHECK: [[PTR_ADD50:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C50]](s32)
; CHECK: G_STORE [[UV48]](p3), [[PTR_ADD50]](p5) :: (store 4 into %stack.0 + 192, align 256, addrspace 5)
; CHECK: [[C51:%[0-9]+]]:_(s32) = G_CONSTANT i32 196
; CHECK: [[PTR_ADD51:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C51]](s32)
; CHECK: G_STORE [[UV49]](p3), [[PTR_ADD51]](p5) :: (store 4 into %stack.0 + 196, align 256, addrspace 5)
; CHECK: [[C52:%[0-9]+]]:_(s32) = G_CONSTANT i32 200
; CHECK: [[PTR_ADD52:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C52]](s32)
; CHECK: G_STORE [[UV50]](p3), [[PTR_ADD52]](p5) :: (store 4 into %stack.0 + 200, align 256, addrspace 5)
; CHECK: [[C53:%[0-9]+]]:_(s32) = G_CONSTANT i32 204
; CHECK: [[PTR_ADD53:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C53]](s32)
; CHECK: G_STORE [[UV51]](p3), [[PTR_ADD53]](p5) :: (store 4 into %stack.0 + 204, align 256, addrspace 5)
; CHECK: [[C54:%[0-9]+]]:_(s32) = G_CONSTANT i32 208
; CHECK: [[PTR_ADD54:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C54]](s32)
; CHECK: G_STORE [[UV52]](p3), [[PTR_ADD54]](p5) :: (store 4 into %stack.0 + 208, align 256, addrspace 5)
; CHECK: [[C55:%[0-9]+]]:_(s32) = G_CONSTANT i32 212
; CHECK: [[PTR_ADD55:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C55]](s32)
; CHECK: G_STORE [[UV53]](p3), [[PTR_ADD55]](p5) :: (store 4 into %stack.0 + 212, align 256, addrspace 5)
; CHECK: [[C56:%[0-9]+]]:_(s32) = G_CONSTANT i32 216
; CHECK: [[PTR_ADD56:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C56]](s32)
; CHECK: G_STORE [[UV54]](p3), [[PTR_ADD56]](p5) :: (store 4 into %stack.0 + 216, align 256, addrspace 5)
; CHECK: [[C57:%[0-9]+]]:_(s32) = G_CONSTANT i32 220
; CHECK: [[PTR_ADD57:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C57]](s32)
; CHECK: G_STORE [[UV55]](p3), [[PTR_ADD57]](p5) :: (store 4 into %stack.0 + 220, align 256, addrspace 5)
; CHECK: [[C58:%[0-9]+]]:_(s32) = G_CONSTANT i32 224
; CHECK: [[PTR_ADD58:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C58]](s32)
; CHECK: G_STORE [[UV56]](p3), [[PTR_ADD58]](p5) :: (store 4 into %stack.0 + 224, align 256, addrspace 5)
; CHECK: [[C59:%[0-9]+]]:_(s32) = G_CONSTANT i32 228
; CHECK: [[PTR_ADD59:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C59]](s32)
; CHECK: G_STORE [[UV57]](p3), [[PTR_ADD59]](p5) :: (store 4 into %stack.0 + 228, align 256, addrspace 5)
; CHECK: [[C60:%[0-9]+]]:_(s32) = G_CONSTANT i32 232
; CHECK: [[PTR_ADD60:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C60]](s32)
; CHECK: G_STORE [[UV58]](p3), [[PTR_ADD60]](p5) :: (store 4 into %stack.0 + 232, align 256, addrspace 5)
; CHECK: [[C61:%[0-9]+]]:_(s32) = G_CONSTANT i32 236
; CHECK: [[PTR_ADD61:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C61]](s32)
; CHECK: G_STORE [[UV59]](p3), [[PTR_ADD61]](p5) :: (store 4 into %stack.0 + 236, align 256, addrspace 5)
; CHECK: [[C62:%[0-9]+]]:_(s32) = G_CONSTANT i32 240
; CHECK: [[PTR_ADD62:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C62]](s32)
; CHECK: G_STORE [[UV60]](p3), [[PTR_ADD62]](p5) :: (store 4 into %stack.0 + 240, align 256, addrspace 5)
; CHECK: [[C63:%[0-9]+]]:_(s32) = G_CONSTANT i32 244
; CHECK: [[PTR_ADD63:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C63]](s32)
; CHECK: G_STORE [[UV61]](p3), [[PTR_ADD63]](p5) :: (store 4 into %stack.0 + 244, align 256, addrspace 5)
; CHECK: [[C64:%[0-9]+]]:_(s32) = G_CONSTANT i32 248
; CHECK: [[PTR_ADD64:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C64]](s32)
; CHECK: G_STORE [[UV62]](p3), [[PTR_ADD64]](p5) :: (store 4 into %stack.0 + 248, align 256, addrspace 5)
; CHECK: [[C65:%[0-9]+]]:_(s32) = G_CONSTANT i32 252
; CHECK: [[PTR_ADD65:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C65]](s32)
; CHECK: G_STORE [[UV63]](p3), [[PTR_ADD65]](p5) :: (store 4 into %stack.0 + 252, align 256, addrspace 5)
; CHECK: [[LOAD4:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD35]](p5) :: (load 4 from %stack.0 + 132, addrspace 5)
; CHECK: S_ENDPGM 0, implicit [[LOAD4]](p3)
%0:_(p1) = COPY $sgpr0_sgpr1
%1:_(s32) = G_CONSTANT i32 33
%2:_(<64 x p3>) = G_LOAD %0 :: (load 256, align 4, addrspace 4)
%3:_(p3) = G_EXTRACT_VECTOR_ELT %2, %1
S_ENDPGM 0, implicit %3
...
---
name: extract_vector_elt_varidx_v64s32
body: |
bb.0:
liveins: $sgpr0_sgpr1, $sgpr2
; CHECK-LABEL: name: extract_vector_elt_varidx_v64s32
; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load 64, align 4, addrspace 4)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 64 + 64, align 4, addrspace 4)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load 64 + 128, align 4, addrspace 4)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 192
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load 64 + 192, align 4, addrspace 4)
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<16 x s32>)
; CHECK: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<16 x s32>)
; CHECK: [[UV32:%[0-9]+]]:_(s32), [[UV33:%[0-9]+]]:_(s32), [[UV34:%[0-9]+]]:_(s32), [[UV35:%[0-9]+]]:_(s32), [[UV36:%[0-9]+]]:_(s32), [[UV37:%[0-9]+]]:_(s32), [[UV38:%[0-9]+]]:_(s32), [[UV39:%[0-9]+]]:_(s32), [[UV40:%[0-9]+]]:_(s32), [[UV41:%[0-9]+]]:_(s32), [[UV42:%[0-9]+]]:_(s32), [[UV43:%[0-9]+]]:_(s32), [[UV44:%[0-9]+]]:_(s32), [[UV45:%[0-9]+]]:_(s32), [[UV46:%[0-9]+]]:_(s32), [[UV47:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD2]](<16 x s32>)
; CHECK: [[UV48:%[0-9]+]]:_(s32), [[UV49:%[0-9]+]]:_(s32), [[UV50:%[0-9]+]]:_(s32), [[UV51:%[0-9]+]]:_(s32), [[UV52:%[0-9]+]]:_(s32), [[UV53:%[0-9]+]]:_(s32), [[UV54:%[0-9]+]]:_(s32), [[UV55:%[0-9]+]]:_(s32), [[UV56:%[0-9]+]]:_(s32), [[UV57:%[0-9]+]]:_(s32), [[UV58:%[0-9]+]]:_(s32), [[UV59:%[0-9]+]]:_(s32), [[UV60:%[0-9]+]]:_(s32), [[UV61:%[0-9]+]]:_(s32), [[UV62:%[0-9]+]]:_(s32), [[UV63:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD3]](<16 x s32>)
; CHECK: G_STORE [[UV]](s32), [[FRAME_INDEX]](p5) :: (store 4 into %stack.0, align 256, addrspace 5)
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C3]](s32)
; CHECK: G_STORE [[UV1]](s32), [[PTR_ADD3]](p5) :: (store 4 into %stack.0 + 4, align 256, addrspace 5)
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C4]](s32)
; CHECK: G_STORE [[UV2]](s32), [[PTR_ADD4]](p5) :: (store 4 into %stack.0 + 8, align 256, addrspace 5)
; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
; CHECK: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C5]](s32)
; CHECK: G_STORE [[UV3]](s32), [[PTR_ADD5]](p5) :: (store 4 into %stack.0 + 12, align 256, addrspace 5)
; CHECK: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C6]](s32)
; CHECK: G_STORE [[UV4]](s32), [[PTR_ADD6]](p5) :: (store 4 into %stack.0 + 16, align 256, addrspace 5)
; CHECK: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
; CHECK: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C7]](s32)
; CHECK: G_STORE [[UV5]](s32), [[PTR_ADD7]](p5) :: (store 4 into %stack.0 + 20, align 256, addrspace 5)
; CHECK: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C8]](s32)
; CHECK: G_STORE [[UV6]](s32), [[PTR_ADD8]](p5) :: (store 4 into %stack.0 + 24, align 256, addrspace 5)
; CHECK: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
; CHECK: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C9]](s32)
; CHECK: G_STORE [[UV7]](s32), [[PTR_ADD9]](p5) :: (store 4 into %stack.0 + 28, align 256, addrspace 5)
; CHECK: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; CHECK: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C10]](s32)
; CHECK: G_STORE [[UV8]](s32), [[PTR_ADD10]](p5) :: (store 4 into %stack.0 + 32, align 256, addrspace 5)
; CHECK: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 36
; CHECK: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C11]](s32)
; CHECK: G_STORE [[UV9]](s32), [[PTR_ADD11]](p5) :: (store 4 into %stack.0 + 36, align 256, addrspace 5)
; CHECK: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
; CHECK: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C12]](s32)
; CHECK: G_STORE [[UV10]](s32), [[PTR_ADD12]](p5) :: (store 4 into %stack.0 + 40, align 256, addrspace 5)
; CHECK: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 44
; CHECK: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C13]](s32)
; CHECK: G_STORE [[UV11]](s32), [[PTR_ADD13]](p5) :: (store 4 into %stack.0 + 44, align 256, addrspace 5)
; CHECK: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
; CHECK: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C14]](s32)
; CHECK: G_STORE [[UV12]](s32), [[PTR_ADD14]](p5) :: (store 4 into %stack.0 + 48, align 256, addrspace 5)
; CHECK: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 52
; CHECK: [[PTR_ADD15:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C15]](s32)
; CHECK: G_STORE [[UV13]](s32), [[PTR_ADD15]](p5) :: (store 4 into %stack.0 + 52, align 256, addrspace 5)
; CHECK: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
; CHECK: [[PTR_ADD16:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C16]](s32)
; CHECK: G_STORE [[UV14]](s32), [[PTR_ADD16]](p5) :: (store 4 into %stack.0 + 56, align 256, addrspace 5)
; CHECK: [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 60
; CHECK: [[PTR_ADD17:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C17]](s32)
; CHECK: G_STORE [[UV15]](s32), [[PTR_ADD17]](p5) :: (store 4 into %stack.0 + 60, align 256, addrspace 5)
; CHECK: [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
; CHECK: [[PTR_ADD18:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C18]](s32)
; CHECK: G_STORE [[UV16]](s32), [[PTR_ADD18]](p5) :: (store 4 into %stack.0 + 64, align 256, addrspace 5)
; CHECK: [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 68
; CHECK: [[PTR_ADD19:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C19]](s32)
; CHECK: G_STORE [[UV17]](s32), [[PTR_ADD19]](p5) :: (store 4 into %stack.0 + 68, align 256, addrspace 5)
; CHECK: [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 72
; CHECK: [[PTR_ADD20:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C20]](s32)
; CHECK: G_STORE [[UV18]](s32), [[PTR_ADD20]](p5) :: (store 4 into %stack.0 + 72, align 256, addrspace 5)
; CHECK: [[C21:%[0-9]+]]:_(s32) = G_CONSTANT i32 76
; CHECK: [[PTR_ADD21:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C21]](s32)
; CHECK: G_STORE [[UV19]](s32), [[PTR_ADD21]](p5) :: (store 4 into %stack.0 + 76, align 256, addrspace 5)
; CHECK: [[C22:%[0-9]+]]:_(s32) = G_CONSTANT i32 80
; CHECK: [[PTR_ADD22:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C22]](s32)
; CHECK: G_STORE [[UV20]](s32), [[PTR_ADD22]](p5) :: (store 4 into %stack.0 + 80, align 256, addrspace 5)
; CHECK: [[C23:%[0-9]+]]:_(s32) = G_CONSTANT i32 84
; CHECK: [[PTR_ADD23:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C23]](s32)
; CHECK: G_STORE [[UV21]](s32), [[PTR_ADD23]](p5) :: (store 4 into %stack.0 + 84, align 256, addrspace 5)
; CHECK: [[C24:%[0-9]+]]:_(s32) = G_CONSTANT i32 88
; CHECK: [[PTR_ADD24:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C24]](s32)
; CHECK: G_STORE [[UV22]](s32), [[PTR_ADD24]](p5) :: (store 4 into %stack.0 + 88, align 256, addrspace 5)
; CHECK: [[C25:%[0-9]+]]:_(s32) = G_CONSTANT i32 92
; CHECK: [[PTR_ADD25:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C25]](s32)
; CHECK: G_STORE [[UV23]](s32), [[PTR_ADD25]](p5) :: (store 4 into %stack.0 + 92, align 256, addrspace 5)
; CHECK: [[C26:%[0-9]+]]:_(s32) = G_CONSTANT i32 96
; CHECK: [[PTR_ADD26:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C26]](s32)
; CHECK: G_STORE [[UV24]](s32), [[PTR_ADD26]](p5) :: (store 4 into %stack.0 + 96, align 256, addrspace 5)
; CHECK: [[C27:%[0-9]+]]:_(s32) = G_CONSTANT i32 100
; CHECK: [[PTR_ADD27:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C27]](s32)
; CHECK: G_STORE [[UV25]](s32), [[PTR_ADD27]](p5) :: (store 4 into %stack.0 + 100, align 256, addrspace 5)
; CHECK: [[C28:%[0-9]+]]:_(s32) = G_CONSTANT i32 104
; CHECK: [[PTR_ADD28:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C28]](s32)
; CHECK: G_STORE [[UV26]](s32), [[PTR_ADD28]](p5) :: (store 4 into %stack.0 + 104, align 256, addrspace 5)
; CHECK: [[C29:%[0-9]+]]:_(s32) = G_CONSTANT i32 108
; CHECK: [[PTR_ADD29:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C29]](s32)
; CHECK: G_STORE [[UV27]](s32), [[PTR_ADD29]](p5) :: (store 4 into %stack.0 + 108, align 256, addrspace 5)
; CHECK: [[C30:%[0-9]+]]:_(s32) = G_CONSTANT i32 112
; CHECK: [[PTR_ADD30:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C30]](s32)
; CHECK: G_STORE [[UV28]](s32), [[PTR_ADD30]](p5) :: (store 4 into %stack.0 + 112, align 256, addrspace 5)
; CHECK: [[C31:%[0-9]+]]:_(s32) = G_CONSTANT i32 116
; CHECK: [[PTR_ADD31:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C31]](s32)
; CHECK: G_STORE [[UV29]](s32), [[PTR_ADD31]](p5) :: (store 4 into %stack.0 + 116, align 256, addrspace 5)
; CHECK: [[C32:%[0-9]+]]:_(s32) = G_CONSTANT i32 120
; CHECK: [[PTR_ADD32:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C32]](s32)
; CHECK: G_STORE [[UV30]](s32), [[PTR_ADD32]](p5) :: (store 4 into %stack.0 + 120, align 256, addrspace 5)
; CHECK: [[C33:%[0-9]+]]:_(s32) = G_CONSTANT i32 124
; CHECK: [[PTR_ADD33:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C33]](s32)
; CHECK: G_STORE [[UV31]](s32), [[PTR_ADD33]](p5) :: (store 4 into %stack.0 + 124, align 256, addrspace 5)
; CHECK: [[C34:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
; CHECK: [[PTR_ADD34:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C34]](s32)
; CHECK: G_STORE [[UV32]](s32), [[PTR_ADD34]](p5) :: (store 4 into %stack.0 + 128, align 256, addrspace 5)
; CHECK: [[C35:%[0-9]+]]:_(s32) = G_CONSTANT i32 132
; CHECK: [[PTR_ADD35:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C35]](s32)
; CHECK: G_STORE [[UV33]](s32), [[PTR_ADD35]](p5) :: (store 4 into %stack.0 + 132, align 256, addrspace 5)
; CHECK: [[C36:%[0-9]+]]:_(s32) = G_CONSTANT i32 136
; CHECK: [[PTR_ADD36:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C36]](s32)
; CHECK: G_STORE [[UV34]](s32), [[PTR_ADD36]](p5) :: (store 4 into %stack.0 + 136, align 256, addrspace 5)
; CHECK: [[C37:%[0-9]+]]:_(s32) = G_CONSTANT i32 140
; CHECK: [[PTR_ADD37:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C37]](s32)
; CHECK: G_STORE [[UV35]](s32), [[PTR_ADD37]](p5) :: (store 4 into %stack.0 + 140, align 256, addrspace 5)
; CHECK: [[C38:%[0-9]+]]:_(s32) = G_CONSTANT i32 144
; CHECK: [[PTR_ADD38:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C38]](s32)
; CHECK: G_STORE [[UV36]](s32), [[PTR_ADD38]](p5) :: (store 4 into %stack.0 + 144, align 256, addrspace 5)
; CHECK: [[C39:%[0-9]+]]:_(s32) = G_CONSTANT i32 148
; CHECK: [[PTR_ADD39:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C39]](s32)
; CHECK: G_STORE [[UV37]](s32), [[PTR_ADD39]](p5) :: (store 4 into %stack.0 + 148, align 256, addrspace 5)
; CHECK: [[C40:%[0-9]+]]:_(s32) = G_CONSTANT i32 152
; CHECK: [[PTR_ADD40:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C40]](s32)
; CHECK: G_STORE [[UV38]](s32), [[PTR_ADD40]](p5) :: (store 4 into %stack.0 + 152, align 256, addrspace 5)
; CHECK: [[C41:%[0-9]+]]:_(s32) = G_CONSTANT i32 156
; CHECK: [[PTR_ADD41:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C41]](s32)
; CHECK: G_STORE [[UV39]](s32), [[PTR_ADD41]](p5) :: (store 4 into %stack.0 + 156, align 256, addrspace 5)
; CHECK: [[C42:%[0-9]+]]:_(s32) = G_CONSTANT i32 160
; CHECK: [[PTR_ADD42:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C42]](s32)
; CHECK: G_STORE [[UV40]](s32), [[PTR_ADD42]](p5) :: (store 4 into %stack.0 + 160, align 256, addrspace 5)
; CHECK: [[C43:%[0-9]+]]:_(s32) = G_CONSTANT i32 164
; CHECK: [[PTR_ADD43:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C43]](s32)
; CHECK: G_STORE [[UV41]](s32), [[PTR_ADD43]](p5) :: (store 4 into %stack.0 + 164, align 256, addrspace 5)
; CHECK: [[C44:%[0-9]+]]:_(s32) = G_CONSTANT i32 168
; CHECK: [[PTR_ADD44:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C44]](s32)
; CHECK: G_STORE [[UV42]](s32), [[PTR_ADD44]](p5) :: (store 4 into %stack.0 + 168, align 256, addrspace 5)
; CHECK: [[C45:%[0-9]+]]:_(s32) = G_CONSTANT i32 172
; CHECK: [[PTR_ADD45:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C45]](s32)
; CHECK: G_STORE [[UV43]](s32), [[PTR_ADD45]](p5) :: (store 4 into %stack.0 + 172, align 256, addrspace 5)
; CHECK: [[C46:%[0-9]+]]:_(s32) = G_CONSTANT i32 176
; CHECK: [[PTR_ADD46:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C46]](s32)
; CHECK: G_STORE [[UV44]](s32), [[PTR_ADD46]](p5) :: (store 4 into %stack.0 + 176, align 256, addrspace 5)
; CHECK: [[C47:%[0-9]+]]:_(s32) = G_CONSTANT i32 180
; CHECK: [[PTR_ADD47:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C47]](s32)
; CHECK: G_STORE [[UV45]](s32), [[PTR_ADD47]](p5) :: (store 4 into %stack.0 + 180, align 256, addrspace 5)
; CHECK: [[C48:%[0-9]+]]:_(s32) = G_CONSTANT i32 184
; CHECK: [[PTR_ADD48:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C48]](s32)
; CHECK: G_STORE [[UV46]](s32), [[PTR_ADD48]](p5) :: (store 4 into %stack.0 + 184, align 256, addrspace 5)
; CHECK: [[C49:%[0-9]+]]:_(s32) = G_CONSTANT i32 188
; CHECK: [[PTR_ADD49:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C49]](s32)
; CHECK: G_STORE [[UV47]](s32), [[PTR_ADD49]](p5) :: (store 4 into %stack.0 + 188, align 256, addrspace 5)
; CHECK: [[C50:%[0-9]+]]:_(s32) = G_CONSTANT i32 192
; CHECK: [[PTR_ADD50:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C50]](s32)
; CHECK: G_STORE [[UV48]](s32), [[PTR_ADD50]](p5) :: (store 4 into %stack.0 + 192, align 256, addrspace 5)
; CHECK: [[C51:%[0-9]+]]:_(s32) = G_CONSTANT i32 196
; CHECK: [[PTR_ADD51:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C51]](s32)
; CHECK: G_STORE [[UV49]](s32), [[PTR_ADD51]](p5) :: (store 4 into %stack.0 + 196, align 256, addrspace 5)
; CHECK: [[C52:%[0-9]+]]:_(s32) = G_CONSTANT i32 200
; CHECK: [[PTR_ADD52:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C52]](s32)
; CHECK: G_STORE [[UV50]](s32), [[PTR_ADD52]](p5) :: (store 4 into %stack.0 + 200, align 256, addrspace 5)
; CHECK: [[C53:%[0-9]+]]:_(s32) = G_CONSTANT i32 204
; CHECK: [[PTR_ADD53:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C53]](s32)
; CHECK: G_STORE [[UV51]](s32), [[PTR_ADD53]](p5) :: (store 4 into %stack.0 + 204, align 256, addrspace 5)
; CHECK: [[C54:%[0-9]+]]:_(s32) = G_CONSTANT i32 208
; CHECK: [[PTR_ADD54:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C54]](s32)
; CHECK: G_STORE [[UV52]](s32), [[PTR_ADD54]](p5) :: (store 4 into %stack.0 + 208, align 256, addrspace 5)
; CHECK: [[C55:%[0-9]+]]:_(s32) = G_CONSTANT i32 212
; CHECK: [[PTR_ADD55:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C55]](s32)
; CHECK: G_STORE [[UV53]](s32), [[PTR_ADD55]](p5) :: (store 4 into %stack.0 + 212, align 256, addrspace 5)
; CHECK: [[C56:%[0-9]+]]:_(s32) = G_CONSTANT i32 216
; CHECK: [[PTR_ADD56:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C56]](s32)
; CHECK: G_STORE [[UV54]](s32), [[PTR_ADD56]](p5) :: (store 4 into %stack.0 + 216, align 256, addrspace 5)
; CHECK: [[C57:%[0-9]+]]:_(s32) = G_CONSTANT i32 220
; CHECK: [[PTR_ADD57:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C57]](s32)
; CHECK: G_STORE [[UV55]](s32), [[PTR_ADD57]](p5) :: (store 4 into %stack.0 + 220, align 256, addrspace 5)
; CHECK: [[C58:%[0-9]+]]:_(s32) = G_CONSTANT i32 224
; CHECK: [[PTR_ADD58:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C58]](s32)
; CHECK: G_STORE [[UV56]](s32), [[PTR_ADD58]](p5) :: (store 4 into %stack.0 + 224, align 256, addrspace 5)
; CHECK: [[C59:%[0-9]+]]:_(s32) = G_CONSTANT i32 228
; CHECK: [[PTR_ADD59:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C59]](s32)
; CHECK: G_STORE [[UV57]](s32), [[PTR_ADD59]](p5) :: (store 4 into %stack.0 + 228, align 256, addrspace 5)
; CHECK: [[C60:%[0-9]+]]:_(s32) = G_CONSTANT i32 232
; CHECK: [[PTR_ADD60:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C60]](s32)
; CHECK: G_STORE [[UV58]](s32), [[PTR_ADD60]](p5) :: (store 4 into %stack.0 + 232, align 256, addrspace 5)
; CHECK: [[C61:%[0-9]+]]:_(s32) = G_CONSTANT i32 236
; CHECK: [[PTR_ADD61:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C61]](s32)
; CHECK: G_STORE [[UV59]](s32), [[PTR_ADD61]](p5) :: (store 4 into %stack.0 + 236, align 256, addrspace 5)
; CHECK: [[C62:%[0-9]+]]:_(s32) = G_CONSTANT i32 240
; CHECK: [[PTR_ADD62:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C62]](s32)
; CHECK: G_STORE [[UV60]](s32), [[PTR_ADD62]](p5) :: (store 4 into %stack.0 + 240, align 256, addrspace 5)
; CHECK: [[C63:%[0-9]+]]:_(s32) = G_CONSTANT i32 244
; CHECK: [[PTR_ADD63:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C63]](s32)
; CHECK: G_STORE [[UV61]](s32), [[PTR_ADD63]](p5) :: (store 4 into %stack.0 + 244, align 256, addrspace 5)
; CHECK: [[C64:%[0-9]+]]:_(s32) = G_CONSTANT i32 248
; CHECK: [[PTR_ADD64:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C64]](s32)
; CHECK: G_STORE [[UV62]](s32), [[PTR_ADD64]](p5) :: (store 4 into %stack.0 + 248, align 256, addrspace 5)
; CHECK: [[C65:%[0-9]+]]:_(s32) = G_CONSTANT i32 252
; CHECK: [[PTR_ADD65:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C65]](s32)
; CHECK: G_STORE [[UV63]](s32), [[PTR_ADD65]](p5) :: (store 4 into %stack.0 + 252, align 256, addrspace 5)
; CHECK: [[C66:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C66]]
; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND]], [[C3]]
; CHECK: [[PTR_ADD66:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[MUL]](s32)
; CHECK: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD66]](p5) :: (load 4, addrspace 5)
; CHECK: S_ENDPGM 0, implicit [[LOAD4]](s32)
%0:_(p1) = COPY $sgpr0_sgpr1
%1:_(s32) = COPY $sgpr2
%2:_(<64 x s32>) = G_LOAD %0 :: (load 256, align 4, addrspace 4)
%3:_(s32) = G_EXTRACT_VECTOR_ELT %2, %1
S_ENDPGM 0, implicit %3
...
---
name: extract_vector_elt_v32s1_varidx_i32
body: |
bb.0:
liveins: $vgpr0, $vgpr1
; CHECK-LABEL: name: extract_vector_elt_v32s1_varidx_i32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
; CHECK: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C3]](s32)
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
; CHECK: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C4]](s32)
; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
; CHECK: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C5]](s32)
; CHECK: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
; CHECK: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C6]](s32)
; CHECK: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C7]](s32)
; CHECK: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
; CHECK: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C8]](s32)
; CHECK: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
; CHECK: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C9]](s32)
; CHECK: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 11
; CHECK: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C10]](s32)
; CHECK: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
; CHECK: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C11]](s32)
; CHECK: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 13
; CHECK: [[LSHR12:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C12]](s32)
; CHECK: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 14
; CHECK: [[LSHR13:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C13]](s32)
; CHECK: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; CHECK: [[LSHR14:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C14]](s32)
; CHECK: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR15:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C15]](s32)
; CHECK: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 17
; CHECK: [[LSHR16:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C16]](s32)
; CHECK: [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 18
; CHECK: [[LSHR17:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C17]](s32)
; CHECK: [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 19
; CHECK: [[LSHR18:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C18]](s32)
; CHECK: [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
; CHECK: [[LSHR19:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C19]](s32)
; CHECK: [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 21
; CHECK: [[LSHR20:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C20]](s32)
; CHECK: [[C21:%[0-9]+]]:_(s32) = G_CONSTANT i32 22
; CHECK: [[LSHR21:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C21]](s32)
; CHECK: [[C22:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
; CHECK: [[LSHR22:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C22]](s32)
; CHECK: [[C23:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[LSHR23:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C23]](s32)
; CHECK: [[C24:%[0-9]+]]:_(s32) = G_CONSTANT i32 25
; CHECK: [[LSHR24:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C24]](s32)
; CHECK: [[C25:%[0-9]+]]:_(s32) = G_CONSTANT i32 26
; CHECK: [[LSHR25:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C25]](s32)
; CHECK: [[C26:%[0-9]+]]:_(s32) = G_CONSTANT i32 27
; CHECK: [[LSHR26:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C26]](s32)
; CHECK: [[C27:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
; CHECK: [[LSHR27:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C27]](s32)
; CHECK: [[C28:%[0-9]+]]:_(s32) = G_CONSTANT i32 29
; CHECK: [[LSHR28:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C28]](s32)
; CHECK: [[C29:%[0-9]+]]:_(s32) = G_CONSTANT i32 30
; CHECK: [[LSHR29:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C29]](s32)
; CHECK: [[C30:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
; CHECK: [[LSHR30:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C30]](s32)
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]]
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32)
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]]
; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C1]](s32)
; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C]]
; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C2]](s32)
; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]]
; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
; CHECK: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C]]
; CHECK: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND4]], [[C3]](s32)
; CHECK: [[OR3:%[0-9]+]]:_(s32) = G_OR [[OR2]], [[SHL3]]
; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32)
; CHECK: [[AND5:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C]]
; CHECK: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[C4]](s32)
; CHECK: [[OR4:%[0-9]+]]:_(s32) = G_OR [[OR3]], [[SHL4]]
; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32)
; CHECK: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C]]
; CHECK: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND6]], [[C5]](s32)
; CHECK: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]]
; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32)
; CHECK: [[AND7:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C]]
; CHECK: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C6]](s32)
; CHECK: [[OR6:%[0-9]+]]:_(s32) = G_OR [[OR5]], [[SHL6]]
; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32)
; CHECK: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C]]
; CHECK: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[AND8]], [[C7]](s32)
; CHECK: [[OR7:%[0-9]+]]:_(s32) = G_OR [[OR6]], [[SHL7]]
; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32)
; CHECK: [[AND9:%[0-9]+]]:_(s32) = G_AND [[COPY11]], [[C]]
; CHECK: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[AND9]], [[C8]](s32)
; CHECK: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]]
; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR9]](s32)
; CHECK: [[AND10:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C]]
; CHECK: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[AND10]], [[C9]](s32)
; CHECK: [[OR9:%[0-9]+]]:_(s32) = G_OR [[OR8]], [[SHL9]]
; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32)
; CHECK: [[AND11:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C]]
; CHECK: [[SHL10:%[0-9]+]]:_(s32) = G_SHL [[AND11]], [[C10]](s32)
; CHECK: [[OR10:%[0-9]+]]:_(s32) = G_OR [[OR9]], [[SHL10]]
; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LSHR11]](s32)
; CHECK: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY14]], [[C]]
; CHECK: [[SHL11:%[0-9]+]]:_(s32) = G_SHL [[AND12]], [[C11]](s32)
; CHECK: [[OR11:%[0-9]+]]:_(s32) = G_OR [[OR10]], [[SHL11]]
; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR12]](s32)
; CHECK: [[AND13:%[0-9]+]]:_(s32) = G_AND [[COPY15]], [[C]]
; CHECK: [[SHL12:%[0-9]+]]:_(s32) = G_SHL [[AND13]], [[C12]](s32)
; CHECK: [[OR12:%[0-9]+]]:_(s32) = G_OR [[OR11]], [[SHL12]]
; CHECK: [[COPY16:%[0-9]+]]:_(s32) = COPY [[LSHR13]](s32)
; CHECK: [[AND14:%[0-9]+]]:_(s32) = G_AND [[COPY16]], [[C]]
; CHECK: [[SHL13:%[0-9]+]]:_(s32) = G_SHL [[AND14]], [[C13]](s32)
; CHECK: [[OR13:%[0-9]+]]:_(s32) = G_OR [[OR12]], [[SHL13]]
; CHECK: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR14]](s32)
; CHECK: [[AND15:%[0-9]+]]:_(s32) = G_AND [[COPY17]], [[C]]
; CHECK: [[SHL14:%[0-9]+]]:_(s32) = G_SHL [[AND15]], [[C14]](s32)
; CHECK: [[OR14:%[0-9]+]]:_(s32) = G_OR [[OR13]], [[SHL14]]
; CHECK: [[COPY18:%[0-9]+]]:_(s32) = COPY [[LSHR15]](s32)
; CHECK: [[AND16:%[0-9]+]]:_(s32) = G_AND [[COPY18]], [[C]]
; CHECK: [[SHL15:%[0-9]+]]:_(s32) = G_SHL [[AND16]], [[C15]](s32)
; CHECK: [[OR15:%[0-9]+]]:_(s32) = G_OR [[OR14]], [[SHL15]]
; CHECK: [[COPY19:%[0-9]+]]:_(s32) = COPY [[LSHR16]](s32)
; CHECK: [[AND17:%[0-9]+]]:_(s32) = G_AND [[COPY19]], [[C]]
; CHECK: [[SHL16:%[0-9]+]]:_(s32) = G_SHL [[AND17]], [[C16]](s32)
; CHECK: [[OR16:%[0-9]+]]:_(s32) = G_OR [[OR15]], [[SHL16]]
; CHECK: [[COPY20:%[0-9]+]]:_(s32) = COPY [[LSHR17]](s32)
; CHECK: [[AND18:%[0-9]+]]:_(s32) = G_AND [[COPY20]], [[C]]
; CHECK: [[SHL17:%[0-9]+]]:_(s32) = G_SHL [[AND18]], [[C17]](s32)
; CHECK: [[OR17:%[0-9]+]]:_(s32) = G_OR [[OR16]], [[SHL17]]
; CHECK: [[COPY21:%[0-9]+]]:_(s32) = COPY [[LSHR18]](s32)
; CHECK: [[AND19:%[0-9]+]]:_(s32) = G_AND [[COPY21]], [[C]]
; CHECK: [[SHL18:%[0-9]+]]:_(s32) = G_SHL [[AND19]], [[C18]](s32)
; CHECK: [[OR18:%[0-9]+]]:_(s32) = G_OR [[OR17]], [[SHL18]]
; CHECK: [[COPY22:%[0-9]+]]:_(s32) = COPY [[LSHR19]](s32)
; CHECK: [[AND20:%[0-9]+]]:_(s32) = G_AND [[COPY22]], [[C]]
; CHECK: [[SHL19:%[0-9]+]]:_(s32) = G_SHL [[AND20]], [[C19]](s32)
; CHECK: [[OR19:%[0-9]+]]:_(s32) = G_OR [[OR18]], [[SHL19]]
; CHECK: [[COPY23:%[0-9]+]]:_(s32) = COPY [[LSHR20]](s32)
; CHECK: [[AND21:%[0-9]+]]:_(s32) = G_AND [[COPY23]], [[C]]
; CHECK: [[SHL20:%[0-9]+]]:_(s32) = G_SHL [[AND21]], [[C20]](s32)
; CHECK: [[OR20:%[0-9]+]]:_(s32) = G_OR [[OR19]], [[SHL20]]
; CHECK: [[COPY24:%[0-9]+]]:_(s32) = COPY [[LSHR21]](s32)
; CHECK: [[AND22:%[0-9]+]]:_(s32) = G_AND [[COPY24]], [[C]]
; CHECK: [[SHL21:%[0-9]+]]:_(s32) = G_SHL [[AND22]], [[C21]](s32)
; CHECK: [[OR21:%[0-9]+]]:_(s32) = G_OR [[OR20]], [[SHL21]]
; CHECK: [[COPY25:%[0-9]+]]:_(s32) = COPY [[LSHR22]](s32)
; CHECK: [[AND23:%[0-9]+]]:_(s32) = G_AND [[COPY25]], [[C]]
; CHECK: [[SHL22:%[0-9]+]]:_(s32) = G_SHL [[AND23]], [[C22]](s32)
; CHECK: [[OR22:%[0-9]+]]:_(s32) = G_OR [[OR21]], [[SHL22]]
; CHECK: [[COPY26:%[0-9]+]]:_(s32) = COPY [[LSHR23]](s32)
; CHECK: [[AND24:%[0-9]+]]:_(s32) = G_AND [[COPY26]], [[C]]
; CHECK: [[SHL23:%[0-9]+]]:_(s32) = G_SHL [[AND24]], [[C23]](s32)
; CHECK: [[OR23:%[0-9]+]]:_(s32) = G_OR [[OR22]], [[SHL23]]
; CHECK: [[COPY27:%[0-9]+]]:_(s32) = COPY [[LSHR24]](s32)
; CHECK: [[AND25:%[0-9]+]]:_(s32) = G_AND [[COPY27]], [[C]]
; CHECK: [[SHL24:%[0-9]+]]:_(s32) = G_SHL [[AND25]], [[C24]](s32)
; CHECK: [[OR24:%[0-9]+]]:_(s32) = G_OR [[OR23]], [[SHL24]]
; CHECK: [[COPY28:%[0-9]+]]:_(s32) = COPY [[LSHR25]](s32)
; CHECK: [[AND26:%[0-9]+]]:_(s32) = G_AND [[COPY28]], [[C]]
; CHECK: [[SHL25:%[0-9]+]]:_(s32) = G_SHL [[AND26]], [[C25]](s32)
; CHECK: [[OR25:%[0-9]+]]:_(s32) = G_OR [[OR24]], [[SHL25]]
; CHECK: [[COPY29:%[0-9]+]]:_(s32) = COPY [[LSHR26]](s32)
; CHECK: [[AND27:%[0-9]+]]:_(s32) = G_AND [[COPY29]], [[C]]
; CHECK: [[SHL26:%[0-9]+]]:_(s32) = G_SHL [[AND27]], [[C26]](s32)
; CHECK: [[OR26:%[0-9]+]]:_(s32) = G_OR [[OR25]], [[SHL26]]
; CHECK: [[COPY30:%[0-9]+]]:_(s32) = COPY [[LSHR27]](s32)
; CHECK: [[AND28:%[0-9]+]]:_(s32) = G_AND [[COPY30]], [[C]]
; CHECK: [[SHL27:%[0-9]+]]:_(s32) = G_SHL [[AND28]], [[C27]](s32)
; CHECK: [[OR27:%[0-9]+]]:_(s32) = G_OR [[OR26]], [[SHL27]]
; CHECK: [[COPY31:%[0-9]+]]:_(s32) = COPY [[LSHR28]](s32)
; CHECK: [[AND29:%[0-9]+]]:_(s32) = G_AND [[COPY31]], [[C]]
; CHECK: [[SHL28:%[0-9]+]]:_(s32) = G_SHL [[AND29]], [[C28]](s32)
; CHECK: [[OR28:%[0-9]+]]:_(s32) = G_OR [[OR27]], [[SHL28]]
; CHECK: [[COPY32:%[0-9]+]]:_(s32) = COPY [[LSHR29]](s32)
; CHECK: [[AND30:%[0-9]+]]:_(s32) = G_AND [[COPY32]], [[C]]
; CHECK: [[SHL29:%[0-9]+]]:_(s32) = G_SHL [[AND30]], [[C29]](s32)
; CHECK: [[OR29:%[0-9]+]]:_(s32) = G_OR [[OR28]], [[SHL29]]
; CHECK: [[COPY33:%[0-9]+]]:_(s32) = COPY [[LSHR30]](s32)
; CHECK: [[AND31:%[0-9]+]]:_(s32) = G_AND [[COPY33]], [[C]]
; CHECK: [[SHL30:%[0-9]+]]:_(s32) = G_SHL [[AND31]], [[C30]](s32)
; CHECK: [[OR30:%[0-9]+]]:_(s32) = G_OR [[OR29]], [[SHL30]]
; CHECK: [[AND32:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C30]]
; CHECK: [[C31:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[SHL31:%[0-9]+]]:_(s32) = G_SHL [[AND32]], [[C31]](s32)
; CHECK: [[LSHR31:%[0-9]+]]:_(s32) = G_LSHR [[OR30]], [[SHL31]](s32)
; CHECK: [[COPY34:%[0-9]+]]:_(s32) = COPY [[LSHR31]](s32)
; CHECK: $vgpr0 = COPY [[COPY34]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(<32 x s1>) = G_BITCAST %0
%3:_(s1) = G_EXTRACT_VECTOR_ELT %2, %1
%4:_(s32) = G_ANYEXT %3
$vgpr0 = COPY %4
...
---
name: extract_vector_elt_v12s8_varidx_s32
body: |
bb.0:
liveins: $vgpr0_vgpr1_vgpr2, $vgpr3
; CHECK-LABEL: name: extract_vector_elt_v12s8_varidx_s32
; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<3 x s32>)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV]], [[C]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[UV]], [[C1]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[UV]], [[C2]](s32)
; CHECK: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C]](s32)
; CHECK: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C1]](s32)
; CHECK: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[UV1]], [[C2]](s32)
; CHECK: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C]](s32)
; CHECK: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C1]](s32)
; CHECK: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[UV2]], [[C2]](s32)
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C3]]
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C3]]
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32)
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C3]]
; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C1]](s32)
; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]]
; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]]
; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C2]](s32)
; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]]
; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
; CHECK: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C3]]
; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
; CHECK: [[AND5:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C3]]
; CHECK: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[C]](s32)
; CHECK: [[OR3:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL3]]
; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32)
; CHECK: [[AND6:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C3]]
; CHECK: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[AND6]], [[C1]](s32)
; CHECK: [[OR4:%[0-9]+]]:_(s32) = G_OR [[OR3]], [[SHL4]]
; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32)
; CHECK: [[AND7:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C3]]
; CHECK: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND7]], [[C2]](s32)
; CHECK: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[SHL5]]
; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
; CHECK: [[AND8:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C3]]
; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32)
; CHECK: [[AND9:%[0-9]+]]:_(s32) = G_AND [[COPY11]], [[C3]]
; CHECK: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[AND9]], [[C]](s32)
; CHECK: [[OR6:%[0-9]+]]:_(s32) = G_OR [[AND8]], [[SHL6]]
; CHECK: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32)
; CHECK: [[AND10:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C3]]
; CHECK: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[AND10]], [[C1]](s32)
; CHECK: [[OR7:%[0-9]+]]:_(s32) = G_OR [[OR6]], [[SHL7]]
; CHECK: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR8]](s32)
; CHECK: [[AND11:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C3]]
; CHECK: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[AND11]], [[C2]](s32)
; CHECK: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR7]], [[SHL8]]
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[OR2]](s32), [[OR5]](s32), [[OR8]](s32)
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; CHECK: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C4]](s32)
; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[BUILD_VECTOR]](<3 x s32>), [[LSHR9]](s32)
; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
; CHECK: [[AND12:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C5]]
; CHECK: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[AND12]], [[C5]](s32)
; CHECK: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[EVEC]], [[SHL9]](s32)
; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LSHR10]](s32)
; CHECK: $vgpr0 = COPY [[COPY14]](s32)
%0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(<12 x s8>) = G_BITCAST %0
%2:_(s32) = COPY $vgpr3
%3:_(s8) = G_EXTRACT_VECTOR_ELT %1, %2
%4:_(s32) = G_ANYEXT %3
$vgpr0 = COPY %4
...
---
name: extract_vector_elt_v3s8_varidx_s32
body: |
bb.0:
liveins: $vgpr0, $vgpr1
; CHECK-LABEL: name: extract_vector_elt_v3s8_varidx_s32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY5]], 8
; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY [[COPY3]](s32)
; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY6]], 8
; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY [[COPY4]](s32)
; CHECK: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY7]], 8
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[SEXT_INREG]](s32), [[SEXT_INREG1]](s32), [[SEXT_INREG2]](s32)
; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[BUILD_VECTOR]](<3 x s32>), [[COPY1]](s32)
; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY [[EVEC]](s32)
; CHECK: $vgpr0 = COPY [[COPY8]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s24) = G_TRUNC %0
%3:_(<3 x s8>) = G_BITCAST %2
%4:_(s8) = G_EXTRACT_VECTOR_ELT %3, %1
%5:_(s32) = G_ANYEXT %4
$vgpr0 = COPY %5
...