[GlobalISel] Avoid making G_PTR_ADD with nullptr

When the first operand is a null pointer we can avoid making a G_PTR_ADD and
make a G_INTTOPTR with the offset operand.
This helps us avoid making add with 0 later on for targets such as AMDGPU.

Differential Revision: https://reviews.llvm.org/D87140
This commit is contained in:
Mirko Brkusanin 2020-10-13 12:54:59 +02:00
parent fb2627d8d2
commit 52ba4fa6aa
7 changed files with 197 additions and 113 deletions

View File

@ -423,6 +423,10 @@ public:
std::pair<Register, Register> &MatchInfo);
///}
/// Combine G_PTR_ADD with nullptr to G_INTTOPTR
bool matchPtrAddZero(MachineInstr &MI);
bool applyPtrAddZero(MachineInstr &MI);
/// Try to transform \p MI by using all of the above
/// combine functions. Returns true if changed.
bool tryCombine(MachineInstr &MI);

View File

@ -490,6 +490,13 @@ def xor_of_and_with_same_reg: GICombineRule <
(apply [{ return Helper.applyXorOfAndWithSameReg(*${root}, ${matchinfo}); }])
>;
// Transform (ptr_add 0, x) -> (int_to_ptr x)
def ptr_add_with_zero: GICombineRule<
(defs root:$root),
(match (wip_match_opcode G_PTR_ADD):$root,
[{ return Helper.matchPtrAddZero(*${root}); }]),
(apply [{ return Helper.applyPtrAddZero(*${root}); }])>;
// FIXME: These should use the custom predicate feature once it lands.
def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
undef_to_negative_one,
@ -525,4 +532,4 @@ def all_combines : GICombineGroup<[trivial_combines, ptr_add_immed_chain,
not_cmp_fold, opt_brcond_by_inverting_cond,
unmerge_merge, fabs_fabs_fold, unmerge_cst, unmerge_dead_to_trunc,
unmerge_zext_to_zext, trunc_ext_fold, trunc_shl,
constant_fp_op, xor_of_and_with_same_reg]>;
constant_fp_op, xor_of_and_with_same_reg, ptr_add_with_zero]>;

View File

@ -2766,6 +2766,32 @@ bool CombinerHelper::applyXorOfAndWithSameReg(
return true;
}
bool CombinerHelper::matchPtrAddZero(MachineInstr &MI) {
Register DstReg = MI.getOperand(0).getReg();
LLT Ty = MRI.getType(DstReg);
const DataLayout &DL = Builder.getMF().getDataLayout();
if (DL.isNonIntegralAddressSpace(Ty.getScalarType().getAddressSpace()))
return false;
if (Ty.isPointer()) {
auto ConstVal = getConstantVRegVal(MI.getOperand(1).getReg(), MRI);
return ConstVal && *ConstVal == 0;
}
assert(Ty.isVector() && "Expecting a vector type");
const MachineInstr *VecMI = MRI.getVRegDef(MI.getOperand(1).getReg());
return isBuildVectorAllZeros(*VecMI, MRI);
}
bool CombinerHelper::applyPtrAddZero(MachineInstr &MI) {
assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD);
Builder.setInstrAndDebugLoc(MI);
Builder.buildIntToPtr(MI.getOperand(0), MI.getOperand(2));
MI.eraseFromParent();
return true;
}
bool CombinerHelper::tryCombine(MachineInstr &MI) {
if (tryCombineCopy(MI))
return true;

View File

@ -0,0 +1,76 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=amdgcn-amd-amdhsa -run-pass=amdgpu-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s
---
name: add_nullptr_shl_add
tracksRegLiveness: true
body: |
bb.0:
liveins: $sgpr0
; CHECK-LABEL: name: add_nullptr_shl_add
; CHECK: liveins: $sgpr0
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr0
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s32)
; CHECK: $vgpr0 = COPY [[SHL]](s32)
%0:_(s32) = COPY $sgpr0
%1:_(s32) = G_CONSTANT i32 3
%2:_(s32) = G_SHL %0, %1(s32)
%3:_(p3) = G_CONSTANT i32 0
%4:_(p3) = G_PTR_ADD %3, %2(s32)
%5:_(s32) = G_PTRTOINT %4(p3)
$vgpr0 = COPY %5(s32)
...
---
name: add_nullptr_mul_add
tracksRegLiveness: true
body: |
bb.0:
liveins: $vgpr0, $vgpr1
; CHECK-LABEL: name: add_nullptr_mul_add
; CHECK: liveins: $vgpr0, $vgpr1
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY1]]
; CHECK: $vgpr0 = COPY [[MUL]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(p3) = G_CONSTANT i32 0
%3:_(s32) = G_MUL %0:_, %1:_
%4:_(p3) = G_PTR_ADD %2:_, %3:_(s32)
%5:_(s32) = G_PTRTOINT %4:_(p3)
$vgpr0 = COPY %5:_(s32)
...
---
name: add_nullptr_vec_all_zero
tracksRegLiveness: true
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2, $vgpr3
; CHECK-LABEL: name: add_nullptr_vec_all_zero
; CHECK: liveins: $vgpr0_vgpr1, $vgpr2, $vgpr3
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr3
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY1]](s32), [[COPY2]](s32)
; CHECK: [[SHL:%[0-9]+]]:_(<2 x s32>) = G_SHL [[COPY]], [[BUILD_VECTOR]](<2 x s32>)
; CHECK: $vgpr0_vgpr1 = COPY [[SHL]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
%2:_(s32) = COPY $vgpr3
%3:_(<2 x s32>) = G_BUILD_VECTOR %1:_(s32), %2:_(s32)
%4:_(<2 x s32>) = G_SHL %0, %3(<2 x s32>)
%5:_(p3) = G_CONSTANT i32 0
%6:_(<2 x p3>) = G_BUILD_VECTOR %5:_(p3), %5:_(p3)
%7:_(<2 x p3>) = G_PTR_ADD %6, %4(<2 x s32>)
%8:_(<2 x s32>) = G_PTRTOINT %7(<2 x p3>)
$vgpr0_vgpr1 = COPY %8(<2 x s32>)
...

View File

@ -1987,25 +1987,23 @@ define amdgpu_ps void @insertelement_s_v16i16_s_s(<16 x i16> addrspace(4)* inreg
; GFX9-NEXT: s_cmp_eq_u32 s7, 4
; GFX9-NEXT: s_cselect_b32 s4, s16, s12
; GFX9-NEXT: s_cmp_eq_u32 s7, 5
; GFX9-NEXT: v_mov_b32_e32 v0, s0
; GFX9-NEXT: s_cselect_b32 s5, s16, s13
; GFX9-NEXT: s_cmp_eq_u32 s7, 6
; GFX9-NEXT: v_mov_b32_e32 v0, s0
; GFX9-NEXT: s_cselect_b32 s6, s16, s14
; GFX9-NEXT: s_cmp_eq_u32 s7, 7
; GFX9-NEXT: s_cselect_b32 s7, s16, s15
; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: v_mov_b32_e32 v2, s2
; GFX9-NEXT: v_mov_b32_e32 v3, s3
; GFX9-NEXT: s_add_u32 s0, 0, 16
; GFX9-NEXT: v_mov_b32_e32 v5, 0
; GFX9-NEXT: s_addc_u32 s1, 0, 0
; GFX9-NEXT: global_store_dwordx4 v[4:5], v[0:3], off
; GFX9-NEXT: v_mov_b32_e32 v5, s1
; GFX9-NEXT: s_cselect_b32 s7, s16, s15
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v4, 16
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s6
; GFX9-NEXT: v_mov_b32_e32 v3, s7
; GFX9-NEXT: v_mov_b32_e32 v4, s0
; GFX9-NEXT: v_mov_b32_e32 v5, 0
; GFX9-NEXT: global_store_dwordx4 v[4:5], v[0:3], off
; GFX9-NEXT: s_endpgm
;
@ -2048,25 +2046,23 @@ define amdgpu_ps void @insertelement_s_v16i16_s_s(<16 x i16> addrspace(4)* inreg
; GFX8-NEXT: s_cmp_eq_u32 s7, 4
; GFX8-NEXT: s_cselect_b32 s4, s16, s12
; GFX8-NEXT: s_cmp_eq_u32 s7, 5
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: s_cselect_b32 s5, s16, s13
; GFX8-NEXT: s_cmp_eq_u32 s7, 6
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: s_cselect_b32 s6, s16, s14
; GFX8-NEXT: s_cmp_eq_u32 s7, 7
; GFX8-NEXT: s_cselect_b32 s7, s16, s15
; GFX8-NEXT: v_mov_b32_e32 v1, s1
; GFX8-NEXT: v_mov_b32_e32 v2, s2
; GFX8-NEXT: v_mov_b32_e32 v3, s3
; GFX8-NEXT: s_add_u32 s0, 0, 16
; GFX8-NEXT: v_mov_b32_e32 v5, 0
; GFX8-NEXT: s_addc_u32 s1, 0, 0
; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX8-NEXT: v_mov_b32_e32 v5, s1
; GFX8-NEXT: s_cselect_b32 s7, s16, s15
; GFX8-NEXT: v_mov_b32_e32 v0, s4
; GFX8-NEXT: v_mov_b32_e32 v4, 16
; GFX8-NEXT: v_mov_b32_e32 v1, s5
; GFX8-NEXT: v_mov_b32_e32 v2, s6
; GFX8-NEXT: v_mov_b32_e32 v3, s7
; GFX8-NEXT: v_mov_b32_e32 v4, s0
; GFX8-NEXT: v_mov_b32_e32 v5, 0
; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
; GFX8-NEXT: s_endpgm
;
@ -2119,14 +2115,14 @@ define amdgpu_ps void @insertelement_s_v16i16_s_s(<16 x i16> addrspace(4)* inreg
; GFX7-NEXT: v_mov_b32_e32 v3, s3
; GFX7-NEXT: s_mov_b32 s10, -1
; GFX7-NEXT: s_mov_b32 s11, 0xf000
; GFX7-NEXT: s_cselect_b32 s7, s16, s15
; GFX7-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0
; GFX7-NEXT: s_nop 0
; GFX7-NEXT: s_cselect_b32 s7, s16, s15
; GFX7-NEXT: v_mov_b32_e32 v0, s4
; GFX7-NEXT: s_mov_b64 s[8:9], 16
; GFX7-NEXT: v_mov_b32_e32 v1, s5
; GFX7-NEXT: v_mov_b32_e32 v2, s6
; GFX7-NEXT: v_mov_b32_e32 v3, s7
; GFX7-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0 offset:16
; GFX7-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0
; GFX7-NEXT: s_endpgm
%vec = load <16 x i16>, <16 x i16> addrspace(4)* %ptr
%insert = insertelement <16 x i16> %vec, i16 %val, i32 %idx
@ -2167,19 +2163,17 @@ define amdgpu_ps void @insertelement_v_v16i16_s_s(<16 x i16> addrspace(1)* %ptr,
; GFX9-NEXT: v_and_or_b32 v10, v1, s13, v0
; GFX9-NEXT: v_cmp_eq_u32_e64 s[12:13], s12, 0
; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, v10, s[12:13]
; GFX9-NEXT: v_cndmask_b32_e64 v2, v4, v10, s[0:1]
; GFX9-NEXT: s_add_u32 s0, 0, 16
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v10, vcc
; GFX9-NEXT: v_cndmask_b32_e64 v2, v4, v10, s[0:1]
; GFX9-NEXT: v_cndmask_b32_e64 v3, v5, v10, s[2:3]
; GFX9-NEXT: v_cndmask_b32_e64 v4, v6, v10, s[4:5]
; GFX9-NEXT: v_cndmask_b32_e64 v5, v7, v10, s[6:7]
; GFX9-NEXT: v_cndmask_b32_e64 v6, v8, v10, s[8:9]
; GFX9-NEXT: v_cndmask_b32_e64 v7, v9, v10, s[10:11]
; GFX9-NEXT: s_addc_u32 s1, 0, 0
; GFX9-NEXT: v_mov_b32_e32 v8, 0
; GFX9-NEXT: v_mov_b32_e32 v11, s1
; GFX9-NEXT: v_mov_b32_e32 v10, 16
; GFX9-NEXT: v_mov_b32_e32 v9, 0
; GFX9-NEXT: v_mov_b32_e32 v10, s0
; GFX9-NEXT: v_mov_b32_e32 v11, 0
; GFX9-NEXT: global_store_dwordx4 v[8:9], v[0:3], off
; GFX9-NEXT: global_store_dwordx4 v[10:11], v[4:7], off
; GFX9-NEXT: s_endpgm
@ -2205,6 +2199,8 @@ define amdgpu_ps void @insertelement_v_v16i16_s_s(<16 x i16> addrspace(1)* %ptr,
; GFX8-NEXT: v_cmp_eq_u32_e64 s[6:7], s12, 5
; GFX8-NEXT: v_cmp_eq_u32_e64 s[8:9], s12, 6
; GFX8-NEXT: v_cmp_eq_u32_e64 s[10:11], s12, 7
; GFX8-NEXT: v_mov_b32_e32 v10, 16
; GFX8-NEXT: v_mov_b32_e32 v11, 0
; GFX8-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1)
; GFX8-NEXT: v_cndmask_b32_e32 v8, v0, v1, vcc
; GFX8-NEXT: v_cndmask_b32_e64 v8, v8, v2, s[0:1]
@ -2217,20 +2213,16 @@ define amdgpu_ps void @insertelement_v_v16i16_s_s(<16 x i16> addrspace(1)* %ptr,
; GFX8-NEXT: v_and_b32_e32 v8, s14, v8
; GFX8-NEXT: v_or_b32_e32 v8, s13, v8
; GFX8-NEXT: v_cmp_eq_u32_e64 s[12:13], s12, 0
; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, v8, s[0:1]
; GFX8-NEXT: s_add_u32 s0, 0, 16
; GFX8-NEXT: s_addc_u32 s1, 0, 0
; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, v8, s[12:13]
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc
; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, v8, s[0:1]
; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, v8, s[2:3]
; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, v8, s[4:5]
; GFX8-NEXT: v_cndmask_b32_e64 v5, v5, v8, s[6:7]
; GFX8-NEXT: v_cndmask_b32_e64 v6, v6, v8, s[8:9]
; GFX8-NEXT: v_cndmask_b32_e64 v7, v7, v8, s[10:11]
; GFX8-NEXT: v_mov_b32_e32 v8, 0
; GFX8-NEXT: v_mov_b32_e32 v11, s1
; GFX8-NEXT: v_mov_b32_e32 v9, 0
; GFX8-NEXT: v_mov_b32_e32 v10, s0
; GFX8-NEXT: flat_store_dwordx4 v[8:9], v[0:3]
; GFX8-NEXT: flat_store_dwordx4 v[10:11], v[4:7]
; GFX8-NEXT: s_endpgm
@ -2276,10 +2268,11 @@ define amdgpu_ps void @insertelement_v_v16i16_s_s(<16 x i16> addrspace(1)* %ptr,
; GFX7-NEXT: v_cndmask_b32_e64 v3, v5, v10, s[2:3]
; GFX7-NEXT: v_cndmask_b32_e64 v4, v6, v10, s[4:5]
; GFX7-NEXT: v_cndmask_b32_e64 v5, v7, v10, s[6:7]
; GFX7-NEXT: buffer_store_dwordx4 v[0:3], off, s[16:19], 0
; GFX7-NEXT: v_cndmask_b32_e64 v6, v8, v10, s[8:9]
; GFX7-NEXT: v_cndmask_b32_e64 v7, v9, v10, s[10:11]
; GFX7-NEXT: buffer_store_dwordx4 v[0:3], off, s[16:19], 0
; GFX7-NEXT: buffer_store_dwordx4 v[4:7], off, s[16:19], 0 offset:16
; GFX7-NEXT: s_mov_b64 s[16:17], 16
; GFX7-NEXT: buffer_store_dwordx4 v[4:7], off, s[16:19], 0
; GFX7-NEXT: s_endpgm
%vec = load <16 x i16>, <16 x i16> addrspace(1 )* %ptr
%insert = insertelement <16 x i16> %vec, i16 %val, i32 %idx
@ -2340,12 +2333,11 @@ define amdgpu_ps void @insertelement_s_v16i16_v_s(<16 x i16> addrspace(4)* inreg
; GFX9-NEXT: v_cndmask_b32_e64 v4, v4, v8, s[0:1]
; GFX9-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc
; GFX9-NEXT: v_mov_b32_e32 v8, 0
; GFX9-NEXT: s_add_u32 s0, 0, 16
; GFX9-NEXT: v_mov_b32_e32 v9, 0
; GFX9-NEXT: global_store_dwordx4 v[8:9], v[0:3], off
; GFX9-NEXT: s_addc_u32 s1, 0, 0
; GFX9-NEXT: v_mov_b32_e32 v0, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: v_mov_b32_e32 v0, 16
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: global_store_dwordx4 v[0:1], v[4:7], off
; GFX9-NEXT: s_endpgm
;
@ -2401,12 +2393,11 @@ define amdgpu_ps void @insertelement_s_v16i16_v_s(<16 x i16> addrspace(4)* inreg
; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, v8, s[0:1]
; GFX8-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc
; GFX8-NEXT: v_mov_b32_e32 v8, 0
; GFX8-NEXT: s_add_u32 s0, 0, 16
; GFX8-NEXT: v_mov_b32_e32 v9, 0
; GFX8-NEXT: flat_store_dwordx4 v[8:9], v[0:3]
; GFX8-NEXT: s_addc_u32 s1, 0, 0
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
; GFX8-NEXT: s_nop 0
; GFX8-NEXT: v_mov_b32_e32 v0, 16
; GFX8-NEXT: v_mov_b32_e32 v1, 0
; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
; GFX8-NEXT: s_endpgm
;
@ -2463,9 +2454,10 @@ define amdgpu_ps void @insertelement_s_v16i16_v_s(<16 x i16> addrspace(4)* inreg
; GFX7-NEXT: s_mov_b64 s[0:1], 0
; GFX7-NEXT: s_mov_b32 s2, -1
; GFX7-NEXT: s_mov_b32 s3, 0xf000
; GFX7-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc
; GFX7-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; GFX7-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
; GFX7-NEXT: v_cndmask_b32_e32 v7, v7, v8, vcc
; GFX7-NEXT: s_mov_b64 s[0:1], 16
; GFX7-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0
; GFX7-NEXT: s_endpgm
%vec = load <16 x i16>, <16 x i16> addrspace(4)* %ptr
%insert = insertelement <16 x i16> %vec, i16 %val, i32 %idx
@ -2518,21 +2510,20 @@ define amdgpu_ps void @insertelement_s_v16i16_s_v(<16 x i16> addrspace(4)* inreg
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_cmp_eq_u32_e64 s[12:13], 0, v8
; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v9, s[0:1]
; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v9, s[12:13]
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v9, s[0:1]
; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, v9, s[2:3]
; GFX9-NEXT: v_cndmask_b32_e64 v4, v4, v9, s[14:15]
; GFX9-NEXT: v_cndmask_b32_e64 v5, v5, v9, s[6:7]
; GFX9-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[8:9]
; GFX9-NEXT: v_cndmask_b32_e64 v7, v7, v9, s[10:11]
; GFX9-NEXT: v_mov_b32_e32 v8, 0
; GFX9-NEXT: s_add_u32 s0, 0, 16
; GFX9-NEXT: v_mov_b32_e32 v9, 0
; GFX9-NEXT: global_store_dwordx4 v[8:9], v[0:3], off
; GFX9-NEXT: s_addc_u32 s1, 0, 0
; GFX9-NEXT: v_mov_b32_e32 v0, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: v_mov_b32_e32 v0, 16
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: global_store_dwordx4 v[0:1], v[4:7], off
; GFX9-NEXT: s_endpgm
;
@ -2581,21 +2572,20 @@ define amdgpu_ps void @insertelement_s_v16i16_s_v(<16 x i16> addrspace(4)* inreg
; GFX8-NEXT: v_mov_b32_e32 v6, s22
; GFX8-NEXT: v_mov_b32_e32 v7, s23
; GFX8-NEXT: v_cmp_eq_u32_e64 s[12:13], 0, v8
; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, v9, s[0:1]
; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, v9, s[12:13]
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, v9, s[0:1]
; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, v9, s[2:3]
; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, v9, s[14:15]
; GFX8-NEXT: v_cndmask_b32_e64 v5, v5, v9, s[6:7]
; GFX8-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[8:9]
; GFX8-NEXT: v_cndmask_b32_e64 v7, v7, v9, s[10:11]
; GFX8-NEXT: v_mov_b32_e32 v8, 0
; GFX8-NEXT: s_add_u32 s0, 0, 16
; GFX8-NEXT: v_mov_b32_e32 v9, 0
; GFX8-NEXT: flat_store_dwordx4 v[8:9], v[0:3]
; GFX8-NEXT: s_addc_u32 s1, 0, 0
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
; GFX8-NEXT: s_nop 0
; GFX8-NEXT: v_mov_b32_e32 v0, 16
; GFX8-NEXT: v_mov_b32_e32 v1, 0
; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
; GFX8-NEXT: s_endpgm
;
@ -2651,12 +2641,13 @@ define amdgpu_ps void @insertelement_s_v16i16_s_v(<16 x i16> addrspace(4)* inreg
; GFX7-NEXT: s_mov_b64 s[0:1], 0
; GFX7-NEXT: s_mov_b32 s2, -1
; GFX7-NEXT: s_mov_b32 s3, 0xf000
; GFX7-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; GFX7-NEXT: v_cndmask_b32_e64 v4, v4, v9, s[14:15]
; GFX7-NEXT: v_cndmask_b32_e64 v5, v5, v9, s[6:7]
; GFX7-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[8:9]
; GFX7-NEXT: v_cndmask_b32_e64 v7, v7, v9, s[10:11]
; GFX7-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; GFX7-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
; GFX7-NEXT: s_mov_b64 s[0:1], 16
; GFX7-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0
; GFX7-NEXT: s_endpgm
%vec = load <16 x i16>, <16 x i16> addrspace(4)* %ptr
%insert = insertelement <16 x i16> %vec, i16 %val, i32 %idx
@ -2708,21 +2699,20 @@ define amdgpu_ps void @insertelement_s_v16i16_v_v(<16 x i16> addrspace(4)* inreg
; GFX9-NEXT: v_mov_b32_e32 v6, s18
; GFX9-NEXT: v_mov_b32_e32 v7, s19
; GFX9-NEXT: v_cmp_eq_u32_e64 s[12:13], 0, v8
; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v9, s[0:1]
; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v9, s[12:13]
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v9, s[0:1]
; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, v9, s[2:3]
; GFX9-NEXT: v_cndmask_b32_e64 v4, v4, v9, s[4:5]
; GFX9-NEXT: v_cndmask_b32_e64 v5, v5, v9, s[6:7]
; GFX9-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[8:9]
; GFX9-NEXT: v_cndmask_b32_e64 v7, v7, v9, s[10:11]
; GFX9-NEXT: v_mov_b32_e32 v8, 0
; GFX9-NEXT: s_add_u32 s0, 0, 16
; GFX9-NEXT: v_mov_b32_e32 v9, 0
; GFX9-NEXT: global_store_dwordx4 v[8:9], v[0:3], off
; GFX9-NEXT: s_addc_u32 s1, 0, 0
; GFX9-NEXT: v_mov_b32_e32 v0, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: v_mov_b32_e32 v0, 16
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: global_store_dwordx4 v[0:1], v[4:7], off
; GFX9-NEXT: s_endpgm
;
@ -2770,21 +2760,20 @@ define amdgpu_ps void @insertelement_s_v16i16_v_v(<16 x i16> addrspace(4)* inreg
; GFX8-NEXT: v_mov_b32_e32 v6, s18
; GFX8-NEXT: v_mov_b32_e32 v7, s19
; GFX8-NEXT: v_cmp_eq_u32_e64 s[12:13], 0, v8
; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, v9, s[0:1]
; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, v9, s[12:13]
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc
; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, v9, s[0:1]
; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, v9, s[2:3]
; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, v9, s[4:5]
; GFX8-NEXT: v_cndmask_b32_e64 v5, v5, v9, s[6:7]
; GFX8-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[8:9]
; GFX8-NEXT: v_cndmask_b32_e64 v7, v7, v9, s[10:11]
; GFX8-NEXT: v_mov_b32_e32 v8, 0
; GFX8-NEXT: s_add_u32 s0, 0, 16
; GFX8-NEXT: v_mov_b32_e32 v9, 0
; GFX8-NEXT: flat_store_dwordx4 v[8:9], v[0:3]
; GFX8-NEXT: s_addc_u32 s1, 0, 0
; GFX8-NEXT: v_mov_b32_e32 v0, s0
; GFX8-NEXT: v_mov_b32_e32 v1, s1
; GFX8-NEXT: s_nop 0
; GFX8-NEXT: v_mov_b32_e32 v0, 16
; GFX8-NEXT: v_mov_b32_e32 v1, 0
; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
; GFX8-NEXT: s_endpgm
;
@ -2840,12 +2829,13 @@ define amdgpu_ps void @insertelement_s_v16i16_v_v(<16 x i16> addrspace(4)* inreg
; GFX7-NEXT: s_mov_b64 s[0:1], 0
; GFX7-NEXT: s_mov_b32 s2, -1
; GFX7-NEXT: s_mov_b32 s3, 0xf000
; GFX7-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; GFX7-NEXT: v_cndmask_b32_e64 v4, v4, v9, s[4:5]
; GFX7-NEXT: v_cndmask_b32_e64 v5, v5, v9, s[6:7]
; GFX7-NEXT: v_cndmask_b32_e64 v6, v6, v9, s[8:9]
; GFX7-NEXT: v_cndmask_b32_e64 v7, v7, v9, s[10:11]
; GFX7-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; GFX7-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
; GFX7-NEXT: s_mov_b64 s[0:1], 16
; GFX7-NEXT: buffer_store_dwordx4 v[4:7], off, s[0:3], 0
; GFX7-NEXT: s_endpgm
%vec = load <16 x i16>, <16 x i16> addrspace(4)* %ptr
%insert = insertelement <16 x i16> %vec, i16 %val, i32 %idx
@ -2884,20 +2874,18 @@ define amdgpu_ps void @insertelement_v_v16i16_s_v(<16 x i16> addrspace(1)* %ptr,
; GFX9-NEXT: v_cndmask_b32_e64 v11, v11, v9, s[8:9]
; GFX9-NEXT: v_cndmask_b32_e64 v11, v11, v10, s[10:11]
; GFX9-NEXT: v_and_or_b32 v11, v11, v1, v2
; GFX9-NEXT: v_cndmask_b32_e64 v2, v5, v11, s[0:1]
; GFX9-NEXT: s_add_u32 s0, 0, 16
; GFX9-NEXT: v_cndmask_b32_e64 v0, v3, v11, s[12:13]
; GFX9-NEXT: v_cndmask_b32_e64 v2, v5, v11, s[0:1]
; GFX9-NEXT: v_cndmask_b32_e64 v3, v6, v11, s[2:3]
; GFX9-NEXT: v_cndmask_b32_e64 v5, v8, v11, s[6:7]
; GFX9-NEXT: v_cndmask_b32_e64 v6, v9, v11, s[8:9]
; GFX9-NEXT: v_mov_b32_e32 v8, 0
; GFX9-NEXT: s_addc_u32 s1, 0, 0
; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v11, vcc
; GFX9-NEXT: v_cndmask_b32_e64 v4, v7, v11, s[4:5]
; GFX9-NEXT: v_cndmask_b32_e64 v7, v10, v11, s[10:11]
; GFX9-NEXT: v_mov_b32_e32 v11, s1
; GFX9-NEXT: v_mov_b32_e32 v10, 16
; GFX9-NEXT: v_mov_b32_e32 v9, 0
; GFX9-NEXT: v_mov_b32_e32 v10, s0
; GFX9-NEXT: v_mov_b32_e32 v11, 0
; GFX9-NEXT: global_store_dwordx4 v[8:9], v[0:3], off
; GFX9-NEXT: global_store_dwordx4 v[10:11], v[4:7], off
; GFX9-NEXT: s_endpgm
@ -2935,20 +2923,18 @@ define amdgpu_ps void @insertelement_v_v16i16_s_v(<16 x i16> addrspace(1)* %ptr,
; GFX8-NEXT: v_cndmask_b32_e64 v11, v11, v10, s[10:11]
; GFX8-NEXT: v_and_b32_e32 v1, v11, v1
; GFX8-NEXT: v_or_b32_e32 v11, v1, v2
; GFX8-NEXT: v_cndmask_b32_e64 v2, v5, v11, s[0:1]
; GFX8-NEXT: s_add_u32 s0, 0, 16
; GFX8-NEXT: v_cndmask_b32_e64 v0, v3, v11, s[12:13]
; GFX8-NEXT: v_cndmask_b32_e64 v2, v5, v11, s[0:1]
; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v11, s[2:3]
; GFX8-NEXT: v_cndmask_b32_e64 v5, v8, v11, s[6:7]
; GFX8-NEXT: v_cndmask_b32_e64 v6, v9, v11, s[8:9]
; GFX8-NEXT: v_mov_b32_e32 v8, 0
; GFX8-NEXT: s_addc_u32 s1, 0, 0
; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v11, vcc
; GFX8-NEXT: v_cndmask_b32_e64 v4, v7, v11, s[4:5]
; GFX8-NEXT: v_cndmask_b32_e64 v7, v10, v11, s[10:11]
; GFX8-NEXT: v_mov_b32_e32 v11, s1
; GFX8-NEXT: v_mov_b32_e32 v10, 16
; GFX8-NEXT: v_mov_b32_e32 v9, 0
; GFX8-NEXT: v_mov_b32_e32 v10, s0
; GFX8-NEXT: v_mov_b32_e32 v11, 0
; GFX8-NEXT: flat_store_dwordx4 v[8:9], v[0:3]
; GFX8-NEXT: flat_store_dwordx4 v[10:11], v[4:7]
; GFX8-NEXT: s_endpgm
@ -2993,11 +2979,12 @@ define amdgpu_ps void @insertelement_v_v16i16_s_v(<16 x i16> addrspace(1)* %ptr,
; GFX7-NEXT: v_cndmask_b32_e64 v2, v5, v11, s[0:1]
; GFX7-NEXT: v_cndmask_b32_e64 v3, v6, v11, s[2:3]
; GFX7-NEXT: v_cndmask_b32_e64 v4, v7, v11, s[4:5]
; GFX7-NEXT: buffer_store_dwordx4 v[0:3], off, s[16:19], 0
; GFX7-NEXT: v_cndmask_b32_e64 v5, v8, v11, s[6:7]
; GFX7-NEXT: v_cndmask_b32_e64 v6, v9, v11, s[8:9]
; GFX7-NEXT: v_cndmask_b32_e64 v7, v10, v11, s[10:11]
; GFX7-NEXT: buffer_store_dwordx4 v[0:3], off, s[16:19], 0
; GFX7-NEXT: buffer_store_dwordx4 v[4:7], off, s[16:19], 0 offset:16
; GFX7-NEXT: s_mov_b64 s[16:17], 16
; GFX7-NEXT: buffer_store_dwordx4 v[4:7], off, s[16:19], 0
; GFX7-NEXT: s_endpgm
%vec = load <16 x i16>, <16 x i16> addrspace(1)* %ptr
%insert = insertelement <16 x i16> %vec, i16 %val, i32 %idx
@ -3034,21 +3021,19 @@ define amdgpu_ps void @insertelement_v_v16i16_v_s(<16 x i16> addrspace(1)* %ptr,
; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v9, s[8:9]
; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v10, s[10:11]
; GFX9-NEXT: v_and_or_b32 v11, v1, s13, v0
; GFX9-NEXT: v_cndmask_b32_e64 v2, v5, v11, s[0:1]
; GFX9-NEXT: s_add_u32 s0, 0, 16
; GFX9-NEXT: v_cmp_eq_u32_e64 s[12:13], s12, 0
; GFX9-NEXT: v_cndmask_b32_e64 v0, v3, v11, s[12:13]
; GFX9-NEXT: v_cndmask_b32_e64 v2, v5, v11, s[0:1]
; GFX9-NEXT: v_cndmask_b32_e64 v3, v6, v11, s[2:3]
; GFX9-NEXT: v_cndmask_b32_e64 v5, v8, v11, s[6:7]
; GFX9-NEXT: v_cndmask_b32_e64 v6, v9, v11, s[8:9]
; GFX9-NEXT: v_mov_b32_e32 v8, 0
; GFX9-NEXT: s_addc_u32 s1, 0, 0
; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v11, vcc
; GFX9-NEXT: v_cndmask_b32_e64 v4, v7, v11, s[4:5]
; GFX9-NEXT: v_cndmask_b32_e64 v7, v10, v11, s[10:11]
; GFX9-NEXT: v_mov_b32_e32 v11, s1
; GFX9-NEXT: v_mov_b32_e32 v10, 16
; GFX9-NEXT: v_mov_b32_e32 v9, 0
; GFX9-NEXT: v_mov_b32_e32 v10, s0
; GFX9-NEXT: v_mov_b32_e32 v11, 0
; GFX9-NEXT: global_store_dwordx4 v[8:9], v[0:3], off
; GFX9-NEXT: global_store_dwordx4 v[10:11], v[4:7], off
; GFX9-NEXT: s_endpgm
@ -3085,21 +3070,19 @@ define amdgpu_ps void @insertelement_v_v16i16_v_s(<16 x i16> addrspace(1)* %ptr,
; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v10, s[10:11]
; GFX8-NEXT: v_and_b32_e32 v1, s13, v1
; GFX8-NEXT: v_or_b32_e32 v11, v1, v0
; GFX8-NEXT: v_cndmask_b32_e64 v2, v5, v11, s[0:1]
; GFX8-NEXT: s_add_u32 s0, 0, 16
; GFX8-NEXT: v_cmp_eq_u32_e64 s[12:13], s12, 0
; GFX8-NEXT: v_cndmask_b32_e64 v0, v3, v11, s[12:13]
; GFX8-NEXT: v_cndmask_b32_e64 v2, v5, v11, s[0:1]
; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v11, s[2:3]
; GFX8-NEXT: v_cndmask_b32_e64 v5, v8, v11, s[6:7]
; GFX8-NEXT: v_cndmask_b32_e64 v6, v9, v11, s[8:9]
; GFX8-NEXT: v_mov_b32_e32 v8, 0
; GFX8-NEXT: s_addc_u32 s1, 0, 0
; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v11, vcc
; GFX8-NEXT: v_cndmask_b32_e64 v4, v7, v11, s[4:5]
; GFX8-NEXT: v_cndmask_b32_e64 v7, v10, v11, s[10:11]
; GFX8-NEXT: v_mov_b32_e32 v11, s1
; GFX8-NEXT: v_mov_b32_e32 v10, 16
; GFX8-NEXT: v_mov_b32_e32 v9, 0
; GFX8-NEXT: v_mov_b32_e32 v10, s0
; GFX8-NEXT: v_mov_b32_e32 v11, 0
; GFX8-NEXT: flat_store_dwordx4 v[8:9], v[0:3]
; GFX8-NEXT: flat_store_dwordx4 v[10:11], v[4:7]
; GFX8-NEXT: s_endpgm
@ -3144,11 +3127,12 @@ define amdgpu_ps void @insertelement_v_v16i16_v_s(<16 x i16> addrspace(1)* %ptr,
; GFX7-NEXT: v_cndmask_b32_e64 v2, v5, v11, s[0:1]
; GFX7-NEXT: v_cndmask_b32_e64 v3, v6, v11, s[2:3]
; GFX7-NEXT: v_cndmask_b32_e64 v4, v7, v11, s[4:5]
; GFX7-NEXT: buffer_store_dwordx4 v[0:3], off, s[16:19], 0
; GFX7-NEXT: v_cndmask_b32_e64 v5, v8, v11, s[6:7]
; GFX7-NEXT: v_cndmask_b32_e64 v6, v9, v11, s[8:9]
; GFX7-NEXT: v_cndmask_b32_e64 v7, v10, v11, s[10:11]
; GFX7-NEXT: buffer_store_dwordx4 v[0:3], off, s[16:19], 0
; GFX7-NEXT: buffer_store_dwordx4 v[4:7], off, s[16:19], 0 offset:16
; GFX7-NEXT: s_mov_b64 s[16:17], 16
; GFX7-NEXT: buffer_store_dwordx4 v[4:7], off, s[16:19], 0
; GFX7-NEXT: s_endpgm
%vec = load <16 x i16>, <16 x i16> addrspace(1)* %ptr
%insert = insertelement <16 x i16> %vec, i16 %val, i32 %idx
@ -3186,20 +3170,18 @@ define amdgpu_ps void @insertelement_v_v16i16_v_v(<16 x i16> addrspace(1)* %ptr,
; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, v10, s[8:9]
; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, v11, s[10:11]
; GFX9-NEXT: v_and_or_b32 v12, v3, v1, v2
; GFX9-NEXT: v_cndmask_b32_e64 v2, v6, v12, s[0:1]
; GFX9-NEXT: s_add_u32 s0, 0, 16
; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, v12, s[12:13]
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v12, vcc
; GFX9-NEXT: v_cndmask_b32_e64 v4, v8, v12, s[4:5]
; GFX9-NEXT: v_cndmask_b32_e64 v5, v9, v12, s[6:7]
; GFX9-NEXT: v_mov_b32_e32 v8, 0
; GFX9-NEXT: s_addc_u32 s1, 0, 0
; GFX9-NEXT: v_cndmask_b32_e64 v2, v6, v12, s[0:1]
; GFX9-NEXT: v_cndmask_b32_e64 v3, v7, v12, s[2:3]
; GFX9-NEXT: v_cndmask_b32_e64 v6, v10, v12, s[8:9]
; GFX9-NEXT: v_cndmask_b32_e64 v7, v11, v12, s[10:11]
; GFX9-NEXT: v_mov_b32_e32 v11, s1
; GFX9-NEXT: v_mov_b32_e32 v10, 16
; GFX9-NEXT: v_mov_b32_e32 v9, 0
; GFX9-NEXT: v_mov_b32_e32 v10, s0
; GFX9-NEXT: v_mov_b32_e32 v11, 0
; GFX9-NEXT: global_store_dwordx4 v[8:9], v[0:3], off
; GFX9-NEXT: global_store_dwordx4 v[10:11], v[4:7], off
; GFX9-NEXT: s_endpgm
@ -3236,20 +3218,18 @@ define amdgpu_ps void @insertelement_v_v16i16_v_v(<16 x i16> addrspace(1)* %ptr,
; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, v11, s[10:11]
; GFX8-NEXT: v_and_b32_e32 v1, v3, v1
; GFX8-NEXT: v_or_b32_e32 v12, v1, v2
; GFX8-NEXT: v_cndmask_b32_e64 v2, v6, v12, s[0:1]
; GFX8-NEXT: s_add_u32 s0, 0, 16
; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, v12, s[12:13]
; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v12, vcc
; GFX8-NEXT: v_cndmask_b32_e64 v4, v8, v12, s[4:5]
; GFX8-NEXT: v_cndmask_b32_e64 v5, v9, v12, s[6:7]
; GFX8-NEXT: v_mov_b32_e32 v8, 0
; GFX8-NEXT: s_addc_u32 s1, 0, 0
; GFX8-NEXT: v_cndmask_b32_e64 v2, v6, v12, s[0:1]
; GFX8-NEXT: v_cndmask_b32_e64 v3, v7, v12, s[2:3]
; GFX8-NEXT: v_cndmask_b32_e64 v6, v10, v12, s[8:9]
; GFX8-NEXT: v_cndmask_b32_e64 v7, v11, v12, s[10:11]
; GFX8-NEXT: v_mov_b32_e32 v11, s1
; GFX8-NEXT: v_mov_b32_e32 v10, 16
; GFX8-NEXT: v_mov_b32_e32 v9, 0
; GFX8-NEXT: v_mov_b32_e32 v10, s0
; GFX8-NEXT: v_mov_b32_e32 v11, 0
; GFX8-NEXT: flat_store_dwordx4 v[8:9], v[0:3]
; GFX8-NEXT: flat_store_dwordx4 v[10:11], v[4:7]
; GFX8-NEXT: s_endpgm
@ -3293,12 +3273,13 @@ define amdgpu_ps void @insertelement_v_v16i16_v_v(<16 x i16> addrspace(1)* %ptr,
; GFX7-NEXT: v_cndmask_b32_e32 v1, v5, v12, vcc
; GFX7-NEXT: v_cndmask_b32_e64 v2, v6, v12, s[0:1]
; GFX7-NEXT: v_cndmask_b32_e64 v3, v7, v12, s[2:3]
; GFX7-NEXT: buffer_store_dwordx4 v[0:3], off, s[16:19], 0
; GFX7-NEXT: v_cndmask_b32_e64 v4, v8, v12, s[4:5]
; GFX7-NEXT: v_cndmask_b32_e64 v5, v9, v12, s[6:7]
; GFX7-NEXT: v_cndmask_b32_e64 v6, v10, v12, s[8:9]
; GFX7-NEXT: v_cndmask_b32_e64 v7, v11, v12, s[10:11]
; GFX7-NEXT: buffer_store_dwordx4 v[0:3], off, s[16:19], 0
; GFX7-NEXT: buffer_store_dwordx4 v[4:7], off, s[16:19], 0 offset:16
; GFX7-NEXT: s_mov_b64 s[16:17], 16
; GFX7-NEXT: buffer_store_dwordx4 v[4:7], off, s[16:19], 0
; GFX7-NEXT: s_endpgm
%vec = load <16 x i16>, <16 x i16> addrspace(1)* %ptr
%insert = insertelement <16 x i16> %vec, i16 %val, i32 %idx

View File

@ -1124,7 +1124,6 @@ define amdgpu_kernel void @atomic_dec_shl_base_lds_0(i32 addrspace(1)* %out, i32
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; CI-NEXT: v_add_i32_e32 v2, vcc, 2, v0
; CI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; CI-NEXT: v_add_i32_e32 v0, vcc, 0, v0
; CI-NEXT: v_mov_b32_e32 v1, 9
; CI-NEXT: s_mov_b32 m0, -1
; CI-NEXT: ds_dec_rtn_u32 v3, v0, v1 offset:8
@ -1142,7 +1141,6 @@ define amdgpu_kernel void @atomic_dec_shl_base_lds_0(i32 addrspace(1)* %out, i32
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; VI-NEXT: v_add_u32_e32 v2, vcc, 2, v0
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 0, v0
; VI-NEXT: v_mov_b32_e32 v1, 9
; VI-NEXT: s_mov_b32 m0, -1
; VI-NEXT: ds_dec_rtn_u32 v3, v0, v1 offset:8
@ -1684,8 +1682,7 @@ define amdgpu_kernel void @atomic_dec_shl_base_lds_0_i64(i64 addrspace(1)* %out,
; CI: ; %bb.0:
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; CI-NEXT: v_add_i32_e32 v4, vcc, 2, v0
; CI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; CI-NEXT: v_add_i32_e32 v2, vcc, 0, v0
; CI-NEXT: v_lshlrev_b32_e32 v2, 3, v0
; CI-NEXT: v_mov_b32_e32 v0, 9
; CI-NEXT: v_mov_b32_e32 v1, 0
; CI-NEXT: s_mov_b32 m0, -1
@ -1703,8 +1700,7 @@ define amdgpu_kernel void @atomic_dec_shl_base_lds_0_i64(i64 addrspace(1)* %out,
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; VI-NEXT: v_add_u32_e32 v4, vcc, 2, v0
; VI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; VI-NEXT: v_add_u32_e32 v2, vcc, 0, v0
; VI-NEXT: v_lshlrev_b32_e32 v2, 3, v0
; VI-NEXT: v_mov_b32_e32 v0, 9
; VI-NEXT: v_mov_b32_e32 v1, 0
; VI-NEXT: s_mov_b32 m0, -1

View File

@ -477,7 +477,6 @@ define amdgpu_kernel void @atomic_inc_shl_base_lds_0_i32(i32 addrspace(1)* %out,
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; CI-NEXT: v_add_i32_e32 v2, vcc, 2, v0
; CI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; CI-NEXT: v_add_i32_e32 v0, vcc, 0, v0
; CI-NEXT: v_mov_b32_e32 v1, 9
; CI-NEXT: s_mov_b32 m0, -1
; CI-NEXT: ds_inc_rtn_u32 v3, v0, v1 offset:8
@ -495,7 +494,6 @@ define amdgpu_kernel void @atomic_inc_shl_base_lds_0_i32(i32 addrspace(1)* %out,
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; VI-NEXT: v_add_u32_e32 v2, vcc, 2, v0
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 0, v0
; VI-NEXT: v_mov_b32_e32 v1, 9
; VI-NEXT: s_mov_b32 m0, -1
; VI-NEXT: ds_inc_rtn_u32 v3, v0, v1 offset:8
@ -513,7 +511,6 @@ define amdgpu_kernel void @atomic_inc_shl_base_lds_0_i32(i32 addrspace(1)* %out,
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; GFX9-NEXT: v_add_u32_e32 v2, 2, v0
; GFX9-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX9-NEXT: v_add_u32_e32 v0, 0, v0
; GFX9-NEXT: v_mov_b32_e32 v1, 9
; GFX9-NEXT: ds_inc_rtn_u32 v3, v0, v1 offset:8
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
@ -1216,8 +1213,7 @@ define amdgpu_kernel void @atomic_inc_shl_base_lds_0_i64(i64 addrspace(1)* %out,
; CI: ; %bb.0:
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; CI-NEXT: v_add_i32_e32 v4, vcc, 2, v0
; CI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; CI-NEXT: v_add_i32_e32 v2, vcc, 0, v0
; CI-NEXT: v_lshlrev_b32_e32 v2, 3, v0
; CI-NEXT: v_mov_b32_e32 v0, 9
; CI-NEXT: v_mov_b32_e32 v1, 0
; CI-NEXT: s_mov_b32 m0, -1
@ -1235,8 +1231,7 @@ define amdgpu_kernel void @atomic_inc_shl_base_lds_0_i64(i64 addrspace(1)* %out,
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; VI-NEXT: v_add_u32_e32 v4, vcc, 2, v0
; VI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; VI-NEXT: v_add_u32_e32 v2, vcc, 0, v0
; VI-NEXT: v_lshlrev_b32_e32 v2, 3, v0
; VI-NEXT: v_mov_b32_e32 v0, 9
; VI-NEXT: v_mov_b32_e32 v1, 0
; VI-NEXT: s_mov_b32 m0, -1
@ -1254,8 +1249,7 @@ define amdgpu_kernel void @atomic_inc_shl_base_lds_0_i64(i64 addrspace(1)* %out,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; GFX9-NEXT: v_add_u32_e32 v4, 2, v0
; GFX9-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX9-NEXT: v_add_u32_e32 v2, 0, v0
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 3, v0
; GFX9-NEXT: v_mov_b32_e32 v0, 9
; GFX9-NEXT: v_mov_b32_e32 v1, 0
; GFX9-NEXT: ds_inc_rtn_u64 v[0:1], v2, v[0:1] offset:16