forked from OSchip/llvm-project
AMDGPU/GlobalISel: Fix legalization failure for s65 shifts
This was trying to clamp s65 down to s32, which wasn't handled so we need to promote all the way to s128 first. Having to order the legalization rules in just the right way is rather dissatisfying, but I'm not sure how smart the legalizer should be in trying to interpret the rules.
This commit is contained in:
parent
95bf5ac8a8
commit
c3a74183a5
|
@ -1345,8 +1345,8 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
|
||||||
}, changeTo(1, S16));
|
}, changeTo(1, S16));
|
||||||
Shifts.maxScalarIf(typeIs(0, S16), 1, S16);
|
Shifts.maxScalarIf(typeIs(0, S16), 1, S16);
|
||||||
Shifts.clampScalar(1, S32, S32);
|
Shifts.clampScalar(1, S32, S32);
|
||||||
Shifts.clampScalar(0, S16, S64);
|
|
||||||
Shifts.widenScalarToNextPow2(0, 16);
|
Shifts.widenScalarToNextPow2(0, 16);
|
||||||
|
Shifts.clampScalar(0, S16, S64);
|
||||||
|
|
||||||
getActionDefinitionsBuilder({G_SSHLSAT, G_USHLSAT})
|
getActionDefinitionsBuilder({G_SSHLSAT, G_USHLSAT})
|
||||||
.minScalar(0, S16)
|
.minScalar(0, S16)
|
||||||
|
@ -1357,8 +1357,8 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
|
||||||
// expansion for the shifted type will produce much worse code if it hasn't
|
// expansion for the shifted type will produce much worse code if it hasn't
|
||||||
// been truncated already.
|
// been truncated already.
|
||||||
Shifts.clampScalar(1, S32, S32);
|
Shifts.clampScalar(1, S32, S32);
|
||||||
Shifts.clampScalar(0, S32, S64);
|
|
||||||
Shifts.widenScalarToNextPow2(0, 32);
|
Shifts.widenScalarToNextPow2(0, 32);
|
||||||
|
Shifts.clampScalar(0, S32, S64);
|
||||||
|
|
||||||
getActionDefinitionsBuilder({G_SSHLSAT, G_USHLSAT})
|
getActionDefinitionsBuilder({G_SSHLSAT, G_USHLSAT})
|
||||||
.minScalar(0, S32)
|
.minScalar(0, S32)
|
||||||
|
|
|
@ -1659,3 +1659,240 @@ define amdgpu_ps <2 x i64> @s_ashr_v2i64(<2 x i64> inreg %value, <2 x i64> inreg
|
||||||
%result = ashr <2 x i64> %value, %amount
|
%result = ashr <2 x i64> %value, %amount
|
||||||
ret <2 x i64> %result
|
ret <2 x i64> %result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
define i65 @v_ashr_i65(i65 %value, i65 %amount) {
|
||||||
|
; GFX6-LABEL: v_ashr_i65:
|
||||||
|
; GFX6: ; %bb.0:
|
||||||
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX6-NEXT: v_bfe_i32 v4, v2, 0, 1
|
||||||
|
; GFX6-NEXT: v_ashrrev_i32_e32 v5, 31, v4
|
||||||
|
; GFX6-NEXT: v_sub_i32_e32 v8, vcc, 64, v3
|
||||||
|
; GFX6-NEXT: v_lshr_b64 v[6:7], v[0:1], v3
|
||||||
|
; GFX6-NEXT: v_lshl_b64 v[8:9], v[4:5], v8
|
||||||
|
; GFX6-NEXT: v_subrev_i32_e32 v2, vcc, 64, v3
|
||||||
|
; GFX6-NEXT: v_ashr_i64 v[10:11], v[4:5], v3
|
||||||
|
; GFX6-NEXT: v_or_b32_e32 v6, v6, v8
|
||||||
|
; GFX6-NEXT: v_ashrrev_i32_e32 v8, 31, v5
|
||||||
|
; GFX6-NEXT: v_ashr_i64 v[4:5], v[4:5], v2
|
||||||
|
; GFX6-NEXT: v_or_b32_e32 v7, v7, v9
|
||||||
|
; GFX6-NEXT: v_cmp_gt_u32_e32 vcc, 64, v3
|
||||||
|
; GFX6-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc
|
||||||
|
; GFX6-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc
|
||||||
|
; GFX6-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v3
|
||||||
|
; GFX6-NEXT: v_cndmask_b32_e64 v0, v2, v0, s[4:5]
|
||||||
|
; GFX6-NEXT: v_cndmask_b32_e64 v1, v4, v1, s[4:5]
|
||||||
|
; GFX6-NEXT: v_cndmask_b32_e32 v2, v8, v10, vcc
|
||||||
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
;
|
||||||
|
; GFX8-LABEL: v_ashr_i65:
|
||||||
|
; GFX8: ; %bb.0:
|
||||||
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX8-NEXT: v_bfe_i32 v4, v2, 0, 1
|
||||||
|
; GFX8-NEXT: v_ashrrev_i32_e32 v5, 31, v4
|
||||||
|
; GFX8-NEXT: v_sub_u32_e32 v8, vcc, 64, v3
|
||||||
|
; GFX8-NEXT: v_lshrrev_b64 v[6:7], v3, v[0:1]
|
||||||
|
; GFX8-NEXT: v_lshlrev_b64 v[8:9], v8, v[4:5]
|
||||||
|
; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, 64, v3
|
||||||
|
; GFX8-NEXT: v_ashrrev_i64 v[10:11], v3, v[4:5]
|
||||||
|
; GFX8-NEXT: v_or_b32_e32 v6, v6, v8
|
||||||
|
; GFX8-NEXT: v_ashrrev_i32_e32 v8, 31, v5
|
||||||
|
; GFX8-NEXT: v_ashrrev_i64 v[4:5], v2, v[4:5]
|
||||||
|
; GFX8-NEXT: v_or_b32_e32 v7, v7, v9
|
||||||
|
; GFX8-NEXT: v_cmp_gt_u32_e32 vcc, 64, v3
|
||||||
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc
|
||||||
|
; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc
|
||||||
|
; GFX8-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v3
|
||||||
|
; GFX8-NEXT: v_cndmask_b32_e64 v0, v2, v0, s[4:5]
|
||||||
|
; GFX8-NEXT: v_cndmask_b32_e64 v1, v4, v1, s[4:5]
|
||||||
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v8, v10, vcc
|
||||||
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
;
|
||||||
|
; GFX9-LABEL: v_ashr_i65:
|
||||||
|
; GFX9: ; %bb.0:
|
||||||
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX9-NEXT: v_bfe_i32 v4, v2, 0, 1
|
||||||
|
; GFX9-NEXT: v_ashrrev_i32_e32 v5, 31, v4
|
||||||
|
; GFX9-NEXT: v_sub_u32_e32 v8, 64, v3
|
||||||
|
; GFX9-NEXT: v_lshrrev_b64 v[6:7], v3, v[0:1]
|
||||||
|
; GFX9-NEXT: v_lshlrev_b64 v[8:9], v8, v[4:5]
|
||||||
|
; GFX9-NEXT: v_subrev_u32_e32 v2, 64, v3
|
||||||
|
; GFX9-NEXT: v_ashrrev_i64 v[10:11], v3, v[4:5]
|
||||||
|
; GFX9-NEXT: v_or_b32_e32 v6, v6, v8
|
||||||
|
; GFX9-NEXT: v_ashrrev_i32_e32 v8, 31, v5
|
||||||
|
; GFX9-NEXT: v_ashrrev_i64 v[4:5], v2, v[4:5]
|
||||||
|
; GFX9-NEXT: v_or_b32_e32 v7, v7, v9
|
||||||
|
; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, 64, v3
|
||||||
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc
|
||||||
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc
|
||||||
|
; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v3
|
||||||
|
; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, v0, s[4:5]
|
||||||
|
; GFX9-NEXT: v_cndmask_b32_e64 v1, v4, v1, s[4:5]
|
||||||
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v8, v10, vcc
|
||||||
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
;
|
||||||
|
; GFX10-LABEL: v_ashr_i65:
|
||||||
|
; GFX10: ; %bb.0:
|
||||||
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
|
||||||
|
; GFX10-NEXT: v_bfe_i32 v4, v2, 0, 1
|
||||||
|
; GFX10-NEXT: v_sub_nc_u32_e32 v2, 64, v3
|
||||||
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v10, 64, v3
|
||||||
|
; GFX10-NEXT: v_lshrrev_b64 v[6:7], v3, v[0:1]
|
||||||
|
; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 64, v3
|
||||||
|
; GFX10-NEXT: v_ashrrev_i32_e32 v5, 31, v4
|
||||||
|
; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 0, v3
|
||||||
|
; GFX10-NEXT: v_lshlrev_b64 v[8:9], v2, v[4:5]
|
||||||
|
; GFX10-NEXT: v_ashrrev_i64 v[10:11], v10, v[4:5]
|
||||||
|
; GFX10-NEXT: v_or_b32_e32 v2, v6, v8
|
||||||
|
; GFX10-NEXT: v_or_b32_e32 v8, v7, v9
|
||||||
|
; GFX10-NEXT: v_ashrrev_i64 v[6:7], v3, v[4:5]
|
||||||
|
; GFX10-NEXT: v_ashrrev_i32_e32 v3, 31, v5
|
||||||
|
; GFX10-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc_lo
|
||||||
|
; GFX10-NEXT: v_cndmask_b32_e32 v4, v11, v8, vcc_lo
|
||||||
|
; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, v0, s4
|
||||||
|
; GFX10-NEXT: v_cndmask_b32_e64 v1, v4, v1, s4
|
||||||
|
; GFX10-NEXT: v_cndmask_b32_e32 v2, v3, v6, vcc_lo
|
||||||
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
%result = ashr i65 %value, %amount
|
||||||
|
ret i65 %result
|
||||||
|
}
|
||||||
|
|
||||||
|
define i65 @v_ashr_i65_33(i65 %value) {
|
||||||
|
; GFX6-LABEL: v_ashr_i65_33:
|
||||||
|
; GFX6: ; %bb.0:
|
||||||
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX6-NEXT: v_mov_b32_e32 v3, v1
|
||||||
|
; GFX6-NEXT: v_bfe_i32 v1, v2, 0, 1
|
||||||
|
; GFX6-NEXT: v_ashrrev_i32_e32 v2, 31, v1
|
||||||
|
; GFX6-NEXT: v_lshl_b64 v[0:1], v[1:2], 31
|
||||||
|
; GFX6-NEXT: v_lshrrev_b32_e32 v3, 1, v3
|
||||||
|
; GFX6-NEXT: v_or_b32_e32 v0, v3, v0
|
||||||
|
; GFX6-NEXT: v_ashrrev_i32_e32 v2, 1, v2
|
||||||
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
;
|
||||||
|
; GFX8-LABEL: v_ashr_i65_33:
|
||||||
|
; GFX8: ; %bb.0:
|
||||||
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX8-NEXT: v_mov_b32_e32 v3, v1
|
||||||
|
; GFX8-NEXT: v_bfe_i32 v1, v2, 0, 1
|
||||||
|
; GFX8-NEXT: v_ashrrev_i32_e32 v2, 31, v1
|
||||||
|
; GFX8-NEXT: v_lshlrev_b64 v[0:1], 31, v[1:2]
|
||||||
|
; GFX8-NEXT: v_lshrrev_b32_e32 v3, 1, v3
|
||||||
|
; GFX8-NEXT: v_or_b32_e32 v0, v3, v0
|
||||||
|
; GFX8-NEXT: v_ashrrev_i32_e32 v2, 1, v2
|
||||||
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
;
|
||||||
|
; GFX9-LABEL: v_ashr_i65_33:
|
||||||
|
; GFX9: ; %bb.0:
|
||||||
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX9-NEXT: v_mov_b32_e32 v3, v1
|
||||||
|
; GFX9-NEXT: v_bfe_i32 v1, v2, 0, 1
|
||||||
|
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v1
|
||||||
|
; GFX9-NEXT: v_lshlrev_b64 v[0:1], 31, v[1:2]
|
||||||
|
; GFX9-NEXT: v_lshrrev_b32_e32 v3, 1, v3
|
||||||
|
; GFX9-NEXT: v_or_b32_e32 v0, v3, v0
|
||||||
|
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 1, v2
|
||||||
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
;
|
||||||
|
; GFX10-LABEL: v_ashr_i65_33:
|
||||||
|
; GFX10: ; %bb.0:
|
||||||
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
|
||||||
|
; GFX10-NEXT: v_mov_b32_e32 v3, v1
|
||||||
|
; GFX10-NEXT: v_bfe_i32 v1, v2, 0, 1
|
||||||
|
; GFX10-NEXT: v_lshrrev_b32_e32 v3, 1, v3
|
||||||
|
; GFX10-NEXT: v_ashrrev_i32_e32 v2, 31, v1
|
||||||
|
; GFX10-NEXT: v_lshlrev_b64 v[0:1], 31, v[1:2]
|
||||||
|
; GFX10-NEXT: v_ashrrev_i32_e32 v2, 1, v2
|
||||||
|
; GFX10-NEXT: v_or_b32_e32 v0, v3, v0
|
||||||
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
%result = ashr i65 %value, 33
|
||||||
|
ret i65 %result
|
||||||
|
}
|
||||||
|
|
||||||
|
define amdgpu_ps i65 @s_ashr_i65(i65 inreg %value, i65 inreg %amount) {
|
||||||
|
; GCN-LABEL: s_ashr_i65:
|
||||||
|
; GCN: ; %bb.0:
|
||||||
|
; GCN-NEXT: s_bfe_i64 s[4:5], s[2:3], 0x10000
|
||||||
|
; GCN-NEXT: s_sub_i32 s10, s3, 64
|
||||||
|
; GCN-NEXT: s_sub_i32 s8, 64, s3
|
||||||
|
; GCN-NEXT: s_cmp_lt_u32 s3, 64
|
||||||
|
; GCN-NEXT: s_cselect_b32 s11, 1, 0
|
||||||
|
; GCN-NEXT: s_cmp_eq_u32 s3, 0
|
||||||
|
; GCN-NEXT: s_cselect_b32 s12, 1, 0
|
||||||
|
; GCN-NEXT: s_ashr_i64 s[6:7], s[4:5], s3
|
||||||
|
; GCN-NEXT: s_lshr_b64 s[2:3], s[0:1], s3
|
||||||
|
; GCN-NEXT: s_lshl_b64 s[8:9], s[4:5], s8
|
||||||
|
; GCN-NEXT: s_or_b64 s[2:3], s[2:3], s[8:9]
|
||||||
|
; GCN-NEXT: s_ashr_i32 s8, s5, 31
|
||||||
|
; GCN-NEXT: s_ashr_i64 s[4:5], s[4:5], s10
|
||||||
|
; GCN-NEXT: s_cmp_lg_u32 s11, 0
|
||||||
|
; GCN-NEXT: s_cselect_b64 s[2:3], s[2:3], s[4:5]
|
||||||
|
; GCN-NEXT: s_cmp_lg_u32 s12, 0
|
||||||
|
; GCN-NEXT: s_mov_b32 s9, s8
|
||||||
|
; GCN-NEXT: s_cselect_b64 s[0:1], s[0:1], s[2:3]
|
||||||
|
; GCN-NEXT: s_cmp_lg_u32 s11, 0
|
||||||
|
; GCN-NEXT: s_cselect_b64 s[2:3], s[6:7], s[8:9]
|
||||||
|
; GCN-NEXT: ; return to shader part epilog
|
||||||
|
;
|
||||||
|
; GFX10-LABEL: s_ashr_i65:
|
||||||
|
; GFX10: ; %bb.0:
|
||||||
|
; GFX10-NEXT: s_bfe_i64 s[4:5], s[2:3], 0x10000
|
||||||
|
; GFX10-NEXT: s_sub_i32 s12, s3, 64
|
||||||
|
; GFX10-NEXT: s_sub_i32 s8, 64, s3
|
||||||
|
; GFX10-NEXT: s_cmp_lt_u32 s3, 64
|
||||||
|
; GFX10-NEXT: s_cselect_b32 s13, 1, 0
|
||||||
|
; GFX10-NEXT: s_cmp_eq_u32 s3, 0
|
||||||
|
; GFX10-NEXT: s_cselect_b32 s14, 1, 0
|
||||||
|
; GFX10-NEXT: s_ashr_i64 s[6:7], s[4:5], s3
|
||||||
|
; GFX10-NEXT: s_lshr_b64 s[2:3], s[0:1], s3
|
||||||
|
; GFX10-NEXT: s_lshl_b64 s[8:9], s[4:5], s8
|
||||||
|
; GFX10-NEXT: s_ashr_i32 s10, s5, 31
|
||||||
|
; GFX10-NEXT: s_or_b64 s[2:3], s[2:3], s[8:9]
|
||||||
|
; GFX10-NEXT: s_ashr_i64 s[4:5], s[4:5], s12
|
||||||
|
; GFX10-NEXT: s_cmp_lg_u32 s13, 0
|
||||||
|
; GFX10-NEXT: s_mov_b32 s11, s10
|
||||||
|
; GFX10-NEXT: s_cselect_b64 s[2:3], s[2:3], s[4:5]
|
||||||
|
; GFX10-NEXT: s_cmp_lg_u32 s14, 0
|
||||||
|
; GFX10-NEXT: s_cselect_b64 s[0:1], s[0:1], s[2:3]
|
||||||
|
; GFX10-NEXT: s_cmp_lg_u32 s13, 0
|
||||||
|
; GFX10-NEXT: s_cselect_b64 s[2:3], s[6:7], s[10:11]
|
||||||
|
; GFX10-NEXT: ; return to shader part epilog
|
||||||
|
%result = ashr i65 %value, %amount
|
||||||
|
ret i65 %result
|
||||||
|
}
|
||||||
|
|
||||||
|
define amdgpu_ps i65 @s_ashr_i65_33(i65 inreg %value) {
|
||||||
|
; GCN-LABEL: s_ashr_i65_33:
|
||||||
|
; GCN: ; %bb.0:
|
||||||
|
; GCN-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x10000
|
||||||
|
; GCN-NEXT: s_lshr_b32 s0, s1, 1
|
||||||
|
; GCN-NEXT: s_mov_b32 s1, 0
|
||||||
|
; GCN-NEXT: s_lshl_b64 s[4:5], s[2:3], 31
|
||||||
|
; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
|
||||||
|
; GCN-NEXT: s_ashr_i32 s2, s3, 1
|
||||||
|
; GCN-NEXT: ; return to shader part epilog
|
||||||
|
;
|
||||||
|
; GFX10-LABEL: s_ashr_i65_33:
|
||||||
|
; GFX10: ; %bb.0:
|
||||||
|
; GFX10-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x10000
|
||||||
|
; GFX10-NEXT: s_lshr_b32 s0, s1, 1
|
||||||
|
; GFX10-NEXT: s_mov_b32 s1, 0
|
||||||
|
; GFX10-NEXT: s_lshl_b64 s[4:5], s[2:3], 31
|
||||||
|
; GFX10-NEXT: s_ashr_i32 s2, s3, 1
|
||||||
|
; GFX10-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
|
||||||
|
; GFX10-NEXT: ; return to shader part epilog
|
||||||
|
%result = ashr i65 %value, 33
|
||||||
|
ret i65 %result
|
||||||
|
}
|
||||||
|
|
||||||
|
; FIXME: Argument lowering asserts
|
||||||
|
; define <2 x i65> @v_ashr_v2i65(<2 x i65> %value, <2 x i65> %amount) {
|
||||||
|
; %result = ashr <2 x i65> %value, %amount
|
||||||
|
; ret <2 x i65> %result
|
||||||
|
; }
|
||||||
|
|
||||||
|
; define amdgpu_ps <2 x i65> @s_ashr_v2i65(<2 x i65> inreg %value, <2 x i65> inreg %amount) {
|
||||||
|
; %result = ashr <2 x i65> %value, %amount
|
||||||
|
; ret <2 x i65> %result
|
||||||
|
; }
|
||||||
|
|
|
@ -1723,3 +1723,299 @@ body: |
|
||||||
%2:_(<2 x s128>) = G_ASHR %0, %1
|
%2:_(<2 x s128>) = G_ASHR %0, %1
|
||||||
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %2
|
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %2
|
||||||
...
|
...
|
||||||
|
|
||||||
|
---
|
||||||
|
name: test_ashr_s65_s32
|
||||||
|
body: |
|
||||||
|
bb.0:
|
||||||
|
liveins: $vgpr0_vgpr1_vgpr2, $vgpr3
|
||||||
|
|
||||||
|
; SI-LABEL: name: test_ashr_s65_s32
|
||||||
|
; SI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; SI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||||
|
; SI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %24(s64)
|
||||||
|
; SI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; SI-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; SI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; SI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; SI-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[MV1]], 1
|
||||||
|
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; SI-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
|
||||||
|
; SI-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
|
||||||
|
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; SI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
|
||||||
|
; SI-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
|
||||||
|
; SI-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[TRUNC]](s32)
|
||||||
|
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV]], [[TRUNC]](s32)
|
||||||
|
; SI-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SEXT_INREG]], [[SUB1]](s32)
|
||||||
|
; SI-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
|
||||||
|
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
|
||||||
|
; SI-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[C2]](s32)
|
||||||
|
; SI-NEXT: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[SUB]](s32)
|
||||||
|
; SI-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[ASHR2]]
|
||||||
|
; SI-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[MV]], [[SELECT]]
|
||||||
|
; SI-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[ASHR]], [[ASHR1]]
|
||||||
|
; SI-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
|
||||||
|
; SI-NEXT: [[TRUNC1:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; SI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC1]](s96)
|
||||||
|
; VI-LABEL: name: test_ashr_s65_s32
|
||||||
|
; VI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; VI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||||
|
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %24(s64)
|
||||||
|
; VI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; VI-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; VI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; VI-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[MV1]], 1
|
||||||
|
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; VI-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
|
||||||
|
; VI-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
|
||||||
|
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; VI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
|
||||||
|
; VI-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
|
||||||
|
; VI-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[TRUNC]](s32)
|
||||||
|
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV]], [[TRUNC]](s32)
|
||||||
|
; VI-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SEXT_INREG]], [[SUB1]](s32)
|
||||||
|
; VI-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
|
||||||
|
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
|
||||||
|
; VI-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[C2]](s32)
|
||||||
|
; VI-NEXT: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[SUB]](s32)
|
||||||
|
; VI-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[ASHR2]]
|
||||||
|
; VI-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[MV]], [[SELECT]]
|
||||||
|
; VI-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[ASHR]], [[ASHR1]]
|
||||||
|
; VI-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
|
||||||
|
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; VI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC1]](s96)
|
||||||
|
; GFX9PLUS-LABEL: name: test_ashr_s65_s32
|
||||||
|
; GFX9PLUS: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; GFX9PLUS-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||||
|
; GFX9PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %24(s64)
|
||||||
|
; GFX9PLUS-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; GFX9PLUS-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; GFX9PLUS-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; GFX9PLUS-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; GFX9PLUS-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[MV1]], 1
|
||||||
|
; GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; GFX9PLUS-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
|
||||||
|
; GFX9PLUS-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
|
||||||
|
; GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; GFX9PLUS-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
|
||||||
|
; GFX9PLUS-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
|
||||||
|
; GFX9PLUS-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[TRUNC]](s32)
|
||||||
|
; GFX9PLUS-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV]], [[TRUNC]](s32)
|
||||||
|
; GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SEXT_INREG]], [[SUB1]](s32)
|
||||||
|
; GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
|
||||||
|
; GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
|
||||||
|
; GFX9PLUS-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[C2]](s32)
|
||||||
|
; GFX9PLUS-NEXT: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[SUB]](s32)
|
||||||
|
; GFX9PLUS-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[ASHR2]]
|
||||||
|
; GFX9PLUS-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[MV]], [[SELECT]]
|
||||||
|
; GFX9PLUS-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[ASHR]], [[ASHR1]]
|
||||||
|
; GFX9PLUS-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
|
||||||
|
; GFX9PLUS-NEXT: [[TRUNC1:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; GFX9PLUS-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC1]](s96)
|
||||||
|
%0:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
%1:_(s32) = COPY $vgpr3
|
||||||
|
%2:_(s65) = G_TRUNC %0
|
||||||
|
%3:_(s65) = G_ASHR %2, %3
|
||||||
|
%4:_(s96) = G_ANYEXT %3
|
||||||
|
$vgpr0_vgpr1_vgpr2 = COPY %4
|
||||||
|
...
|
||||||
|
|
||||||
|
---
|
||||||
|
name: test_ashr_s65_s32_constant8
|
||||||
|
body: |
|
||||||
|
bb.0:
|
||||||
|
liveins: $vgpr0_vgpr1_vgpr2
|
||||||
|
|
||||||
|
; SI-LABEL: name: test_ashr_s65_s32_constant8
|
||||||
|
; SI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; SI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %24(s64)
|
||||||
|
; SI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; SI-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; SI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; SI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; SI-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[MV1]], 1
|
||||||
|
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; SI-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
|
||||||
|
; SI-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
|
||||||
|
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; SI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
|
||||||
|
; SI-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
|
||||||
|
; SI-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[TRUNC]](s32)
|
||||||
|
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV]], [[TRUNC]](s32)
|
||||||
|
; SI-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SEXT_INREG]], [[SUB1]](s32)
|
||||||
|
; SI-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
|
||||||
|
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
|
||||||
|
; SI-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[C2]](s32)
|
||||||
|
; SI-NEXT: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[SUB]](s32)
|
||||||
|
; SI-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[ASHR2]]
|
||||||
|
; SI-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[MV]], [[SELECT]]
|
||||||
|
; SI-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[ASHR]], [[ASHR1]]
|
||||||
|
; SI-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
|
||||||
|
; SI-NEXT: [[TRUNC1:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; SI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC1]](s96)
|
||||||
|
; VI-LABEL: name: test_ashr_s65_s32_constant8
|
||||||
|
; VI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %24(s64)
|
||||||
|
; VI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; VI-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; VI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; VI-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[MV1]], 1
|
||||||
|
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; VI-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
|
||||||
|
; VI-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
|
||||||
|
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; VI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
|
||||||
|
; VI-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
|
||||||
|
; VI-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[TRUNC]](s32)
|
||||||
|
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV]], [[TRUNC]](s32)
|
||||||
|
; VI-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SEXT_INREG]], [[SUB1]](s32)
|
||||||
|
; VI-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
|
||||||
|
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
|
||||||
|
; VI-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[C2]](s32)
|
||||||
|
; VI-NEXT: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[SUB]](s32)
|
||||||
|
; VI-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[ASHR2]]
|
||||||
|
; VI-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[MV]], [[SELECT]]
|
||||||
|
; VI-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[ASHR]], [[ASHR1]]
|
||||||
|
; VI-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
|
||||||
|
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; VI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC1]](s96)
|
||||||
|
; GFX9PLUS-LABEL: name: test_ashr_s65_s32_constant8
|
||||||
|
; GFX9PLUS: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; GFX9PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %24(s64)
|
||||||
|
; GFX9PLUS-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; GFX9PLUS-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; GFX9PLUS-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; GFX9PLUS-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; GFX9PLUS-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[MV1]], 1
|
||||||
|
; GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; GFX9PLUS-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
|
||||||
|
; GFX9PLUS-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
|
||||||
|
; GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; GFX9PLUS-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
|
||||||
|
; GFX9PLUS-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
|
||||||
|
; GFX9PLUS-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[TRUNC]](s32)
|
||||||
|
; GFX9PLUS-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV]], [[TRUNC]](s32)
|
||||||
|
; GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SEXT_INREG]], [[SUB1]](s32)
|
||||||
|
; GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
|
||||||
|
; GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
|
||||||
|
; GFX9PLUS-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[C2]](s32)
|
||||||
|
; GFX9PLUS-NEXT: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[SUB]](s32)
|
||||||
|
; GFX9PLUS-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[ASHR2]]
|
||||||
|
; GFX9PLUS-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[MV]], [[SELECT]]
|
||||||
|
; GFX9PLUS-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[ASHR]], [[ASHR1]]
|
||||||
|
; GFX9PLUS-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
|
||||||
|
; GFX9PLUS-NEXT: [[TRUNC1:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; GFX9PLUS-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC1]](s96)
|
||||||
|
%0:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
%1:_(s32) = G_CONSTANT i32 8
|
||||||
|
%2:_(s65) = G_TRUNC %0
|
||||||
|
%3:_(s65) = G_ASHR %2, %3
|
||||||
|
%4:_(s96) = G_ANYEXT %3
|
||||||
|
$vgpr0_vgpr1_vgpr2 = COPY %4
|
||||||
|
...
|
||||||
|
|
||||||
|
---
|
||||||
|
name: test_ashr_s65_s32_known_pow2
|
||||||
|
body: |
|
||||||
|
bb.0:
|
||||||
|
liveins: $vgpr0_vgpr1_vgpr2, $vgpr3
|
||||||
|
|
||||||
|
; SI-LABEL: name: test_ashr_s65_s32_known_pow2
|
||||||
|
; SI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; SI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||||
|
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
|
||||||
|
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[C]], [[COPY1]](s32)
|
||||||
|
; SI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; SI-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; SI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; SI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; SI-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[MV1]], 1
|
||||||
|
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; SI-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[C1]]
|
||||||
|
; SI-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SHL]]
|
||||||
|
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; SI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SHL]](s32), [[C1]]
|
||||||
|
; SI-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SHL]](s32), [[C2]]
|
||||||
|
; SI-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[SHL]](s32)
|
||||||
|
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV]], [[SHL]](s32)
|
||||||
|
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[SEXT_INREG]], [[SUB1]](s32)
|
||||||
|
; SI-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL1]]
|
||||||
|
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
|
||||||
|
; SI-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[C3]](s32)
|
||||||
|
; SI-NEXT: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[SUB]](s32)
|
||||||
|
; SI-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[ASHR2]]
|
||||||
|
; SI-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[MV]], [[SELECT]]
|
||||||
|
; SI-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[ASHR]], [[ASHR1]]
|
||||||
|
; SI-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
|
||||||
|
; SI-NEXT: [[TRUNC:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; SI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC]](s96)
|
||||||
|
; VI-LABEL: name: test_ashr_s65_s32_known_pow2
|
||||||
|
; VI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; VI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||||
|
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
|
||||||
|
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[C]], [[COPY1]](s32)
|
||||||
|
; VI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; VI-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; VI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; VI-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[MV1]], 1
|
||||||
|
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; VI-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[C1]]
|
||||||
|
; VI-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SHL]]
|
||||||
|
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; VI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SHL]](s32), [[C1]]
|
||||||
|
; VI-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SHL]](s32), [[C2]]
|
||||||
|
; VI-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[SHL]](s32)
|
||||||
|
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV]], [[SHL]](s32)
|
||||||
|
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[SEXT_INREG]], [[SUB1]](s32)
|
||||||
|
; VI-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL1]]
|
||||||
|
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
|
||||||
|
; VI-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[C3]](s32)
|
||||||
|
; VI-NEXT: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[SUB]](s32)
|
||||||
|
; VI-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[ASHR2]]
|
||||||
|
; VI-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[MV]], [[SELECT]]
|
||||||
|
; VI-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[ASHR]], [[ASHR1]]
|
||||||
|
; VI-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
|
||||||
|
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; VI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC]](s96)
|
||||||
|
; GFX9PLUS-LABEL: name: test_ashr_s65_s32_known_pow2
|
||||||
|
; GFX9PLUS: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; GFX9PLUS-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||||
|
; GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
|
||||||
|
; GFX9PLUS-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[C]], [[COPY1]](s32)
|
||||||
|
; GFX9PLUS-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; GFX9PLUS-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; GFX9PLUS-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; GFX9PLUS-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; GFX9PLUS-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[MV1]], 1
|
||||||
|
; GFX9PLUS-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; GFX9PLUS-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[C1]]
|
||||||
|
; GFX9PLUS-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SHL]]
|
||||||
|
; GFX9PLUS-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; GFX9PLUS-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SHL]](s32), [[C1]]
|
||||||
|
; GFX9PLUS-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SHL]](s32), [[C2]]
|
||||||
|
; GFX9PLUS-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[SHL]](s32)
|
||||||
|
; GFX9PLUS-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV]], [[SHL]](s32)
|
||||||
|
; GFX9PLUS-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[SEXT_INREG]], [[SUB1]](s32)
|
||||||
|
; GFX9PLUS-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL1]]
|
||||||
|
; GFX9PLUS-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
|
||||||
|
; GFX9PLUS-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[C3]](s32)
|
||||||
|
; GFX9PLUS-NEXT: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG]], [[SUB]](s32)
|
||||||
|
; GFX9PLUS-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[ASHR2]]
|
||||||
|
; GFX9PLUS-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[MV]], [[SELECT]]
|
||||||
|
; GFX9PLUS-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[ASHR]], [[ASHR1]]
|
||||||
|
; GFX9PLUS-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
|
||||||
|
; GFX9PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; GFX9PLUS-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC]](s96)
|
||||||
|
%0:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
%1:_(s32) = COPY $vgpr3
|
||||||
|
%2:_(s32) = G_CONSTANT i32 1
|
||||||
|
%3:_(s32) = G_SHL %2, %1
|
||||||
|
%4:_(s65) = G_TRUNC %0
|
||||||
|
%5:_(s65) = G_ASHR %4, %3
|
||||||
|
%6:_(s96) = G_ANYEXT %5
|
||||||
|
$vgpr0_vgpr1_vgpr2 = COPY %6
|
||||||
|
...
|
||||||
|
|
|
@ -1695,3 +1695,317 @@ body: |
|
||||||
%2:_(<2 x s128>) = G_LSHR %0, %1
|
%2:_(<2 x s128>) = G_LSHR %0, %1
|
||||||
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %2
|
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %2
|
||||||
...
|
...
|
||||||
|
|
||||||
|
---
|
||||||
|
name: test_lshr_s65_s32
|
||||||
|
body: |
|
||||||
|
bb.0:
|
||||||
|
liveins: $vgpr0_vgpr1_vgpr2, $vgpr3
|
||||||
|
|
||||||
|
; SI-LABEL: name: test_lshr_s65_s32
|
||||||
|
; SI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; SI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||||
|
; SI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %23(s64)
|
||||||
|
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
|
||||||
|
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
|
||||||
|
; SI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; SI-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; SI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; SI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; SI-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[MV]], [[C]]
|
||||||
|
; SI-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[MV1]], [[C1]]
|
||||||
|
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; SI-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
|
||||||
|
; SI-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
|
||||||
|
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; SI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
|
||||||
|
; SI-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C3]]
|
||||||
|
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[TRUNC]](s32)
|
||||||
|
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[TRUNC]](s32)
|
||||||
|
; SI-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[AND1]], [[SUB1]](s32)
|
||||||
|
; SI-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL]]
|
||||||
|
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
||||||
|
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[SUB]](s32)
|
||||||
|
; SI-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[LSHR2]]
|
||||||
|
; SI-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[AND]], [[SELECT]]
|
||||||
|
; SI-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[LSHR]], [[C4]]
|
||||||
|
; SI-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
|
||||||
|
; SI-NEXT: [[TRUNC1:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; SI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC1]](s96)
|
||||||
|
; VI-LABEL: name: test_lshr_s65_s32
|
||||||
|
; VI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; VI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||||
|
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %23(s64)
|
||||||
|
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
|
||||||
|
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
|
||||||
|
; VI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; VI-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; VI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; VI-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[MV]], [[C]]
|
||||||
|
; VI-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[MV1]], [[C1]]
|
||||||
|
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; VI-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
|
||||||
|
; VI-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
|
||||||
|
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; VI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
|
||||||
|
; VI-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C3]]
|
||||||
|
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[TRUNC]](s32)
|
||||||
|
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[TRUNC]](s32)
|
||||||
|
; VI-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[AND1]], [[SUB1]](s32)
|
||||||
|
; VI-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL]]
|
||||||
|
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
||||||
|
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[SUB]](s32)
|
||||||
|
; VI-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[LSHR2]]
|
||||||
|
; VI-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[AND]], [[SELECT]]
|
||||||
|
; VI-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[LSHR]], [[C4]]
|
||||||
|
; VI-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
|
||||||
|
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; VI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC1]](s96)
|
||||||
|
; GFX9-LABEL: name: test_lshr_s65_s32
|
||||||
|
; GFX9: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||||
|
; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %23(s64)
|
||||||
|
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
|
||||||
|
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
|
||||||
|
; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; GFX9-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[MV]], [[C]]
|
||||||
|
; GFX9-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[MV1]], [[C1]]
|
||||||
|
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; GFX9-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
|
||||||
|
; GFX9-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
|
||||||
|
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; GFX9-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
|
||||||
|
; GFX9-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C3]]
|
||||||
|
; GFX9-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[TRUNC]](s32)
|
||||||
|
; GFX9-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[TRUNC]](s32)
|
||||||
|
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[AND1]], [[SUB1]](s32)
|
||||||
|
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL]]
|
||||||
|
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
||||||
|
; GFX9-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[SUB]](s32)
|
||||||
|
; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[LSHR2]]
|
||||||
|
; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[AND]], [[SELECT]]
|
||||||
|
; GFX9-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[LSHR]], [[C4]]
|
||||||
|
; GFX9-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
|
||||||
|
; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC1]](s96)
|
||||||
|
%0:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
%1:_(s32) = COPY $vgpr3
|
||||||
|
%2:_(s65) = G_TRUNC %0
|
||||||
|
%3:_(s65) = G_LSHR %2, %3
|
||||||
|
%4:_(s96) = G_ANYEXT %3
|
||||||
|
$vgpr0_vgpr1_vgpr2 = COPY %4
|
||||||
|
...
|
||||||
|
|
||||||
|
---
|
||||||
|
name: test_lshr_s65_s32_constant8
|
||||||
|
body: |
|
||||||
|
bb.0:
|
||||||
|
liveins: $vgpr0_vgpr1_vgpr2
|
||||||
|
|
||||||
|
; SI-LABEL: name: test_lshr_s65_s32_constant8
|
||||||
|
; SI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; SI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %23(s64)
|
||||||
|
; SI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
|
||||||
|
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
|
||||||
|
; SI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; SI-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; SI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; SI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; SI-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[MV]], [[C]]
|
||||||
|
; SI-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[MV1]], [[C1]]
|
||||||
|
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; SI-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
|
||||||
|
; SI-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
|
||||||
|
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; SI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
|
||||||
|
; SI-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C3]]
|
||||||
|
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[TRUNC]](s32)
|
||||||
|
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[TRUNC]](s32)
|
||||||
|
; SI-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[AND1]], [[SUB1]](s32)
|
||||||
|
; SI-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL]]
|
||||||
|
; SI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
||||||
|
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[SUB]](s32)
|
||||||
|
; SI-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[LSHR2]]
|
||||||
|
; SI-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[AND]], [[SELECT]]
|
||||||
|
; SI-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[LSHR]], [[C4]]
|
||||||
|
; SI-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
|
||||||
|
; SI-NEXT: [[TRUNC1:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; SI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC1]](s96)
|
||||||
|
; VI-LABEL: name: test_lshr_s65_s32_constant8
|
||||||
|
; VI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %23(s64)
|
||||||
|
; VI-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
|
||||||
|
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
|
||||||
|
; VI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; VI-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; VI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; VI-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[MV]], [[C]]
|
||||||
|
; VI-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[MV1]], [[C1]]
|
||||||
|
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; VI-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
|
||||||
|
; VI-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
|
||||||
|
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; VI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
|
||||||
|
; VI-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C3]]
|
||||||
|
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[TRUNC]](s32)
|
||||||
|
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[TRUNC]](s32)
|
||||||
|
; VI-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[AND1]], [[SUB1]](s32)
|
||||||
|
; VI-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL]]
|
||||||
|
; VI-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
||||||
|
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[SUB]](s32)
|
||||||
|
; VI-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[LSHR2]]
|
||||||
|
; VI-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[AND]], [[SELECT]]
|
||||||
|
; VI-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[LSHR]], [[C4]]
|
||||||
|
; VI-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
|
||||||
|
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; VI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC1]](s96)
|
||||||
|
; GFX9-LABEL: name: test_lshr_s65_s32_constant8
|
||||||
|
; GFX9: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %23(s64)
|
||||||
|
; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
|
||||||
|
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
|
||||||
|
; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; GFX9-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[MV]], [[C]]
|
||||||
|
; GFX9-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[MV1]], [[C1]]
|
||||||
|
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; GFX9-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
|
||||||
|
; GFX9-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
|
||||||
|
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; GFX9-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
|
||||||
|
; GFX9-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C3]]
|
||||||
|
; GFX9-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[TRUNC]](s32)
|
||||||
|
; GFX9-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[TRUNC]](s32)
|
||||||
|
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[AND1]], [[SUB1]](s32)
|
||||||
|
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL]]
|
||||||
|
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
||||||
|
; GFX9-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[SUB]](s32)
|
||||||
|
; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[LSHR2]]
|
||||||
|
; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[AND]], [[SELECT]]
|
||||||
|
; GFX9-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[LSHR]], [[C4]]
|
||||||
|
; GFX9-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
|
||||||
|
; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC1]](s96)
|
||||||
|
%0:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
%1:_(s32) = G_CONSTANT i32 8
|
||||||
|
%2:_(s65) = G_TRUNC %0
|
||||||
|
%3:_(s65) = G_LSHR %2, %3
|
||||||
|
%4:_(s96) = G_ANYEXT %3
|
||||||
|
$vgpr0_vgpr1_vgpr2 = COPY %4
|
||||||
|
...
|
||||||
|
|
||||||
|
---
|
||||||
|
name: test_lshr_s65_s32_known_pow2
|
||||||
|
body: |
|
||||||
|
bb.0:
|
||||||
|
liveins: $vgpr0_vgpr1_vgpr2, $vgpr3
|
||||||
|
|
||||||
|
; SI-LABEL: name: test_lshr_s65_s32_known_pow2
|
||||||
|
; SI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; SI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||||
|
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
|
||||||
|
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[C]], [[COPY1]](s32)
|
||||||
|
; SI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
|
||||||
|
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
|
||||||
|
; SI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; SI-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; SI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; SI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; SI-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[MV]], [[C1]]
|
||||||
|
; SI-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[MV1]], [[C2]]
|
||||||
|
; SI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; SI-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[C3]]
|
||||||
|
; SI-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[SHL]]
|
||||||
|
; SI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; SI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SHL]](s32), [[C3]]
|
||||||
|
; SI-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SHL]](s32), [[C4]]
|
||||||
|
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[SHL]](s32)
|
||||||
|
; SI-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[SHL]](s32)
|
||||||
|
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[AND1]], [[SUB1]](s32)
|
||||||
|
; SI-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL1]]
|
||||||
|
; SI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
||||||
|
; SI-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[SUB]](s32)
|
||||||
|
; SI-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[LSHR2]]
|
||||||
|
; SI-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[AND]], [[SELECT]]
|
||||||
|
; SI-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[LSHR]], [[C5]]
|
||||||
|
; SI-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
|
||||||
|
; SI-NEXT: [[TRUNC:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; SI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC]](s96)
|
||||||
|
; VI-LABEL: name: test_lshr_s65_s32_known_pow2
|
||||||
|
; VI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; VI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||||
|
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
|
||||||
|
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[C]], [[COPY1]](s32)
|
||||||
|
; VI-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
|
||||||
|
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
|
||||||
|
; VI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; VI-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; VI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; VI-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[MV]], [[C1]]
|
||||||
|
; VI-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[MV1]], [[C2]]
|
||||||
|
; VI-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; VI-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[C3]]
|
||||||
|
; VI-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[SHL]]
|
||||||
|
; VI-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; VI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SHL]](s32), [[C3]]
|
||||||
|
; VI-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SHL]](s32), [[C4]]
|
||||||
|
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[SHL]](s32)
|
||||||
|
; VI-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[SHL]](s32)
|
||||||
|
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[AND1]], [[SUB1]](s32)
|
||||||
|
; VI-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL1]]
|
||||||
|
; VI-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
||||||
|
; VI-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[SUB]](s32)
|
||||||
|
; VI-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[LSHR2]]
|
||||||
|
; VI-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[AND]], [[SELECT]]
|
||||||
|
; VI-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[LSHR]], [[C5]]
|
||||||
|
; VI-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
|
||||||
|
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; VI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC]](s96)
|
||||||
|
; GFX9-LABEL: name: test_lshr_s65_s32_known_pow2
|
||||||
|
; GFX9: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||||
|
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
|
||||||
|
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[C]], [[COPY1]](s32)
|
||||||
|
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
|
||||||
|
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
|
||||||
|
; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; GFX9-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[MV]], [[C1]]
|
||||||
|
; GFX9-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[MV1]], [[C2]]
|
||||||
|
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; GFX9-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[C3]]
|
||||||
|
; GFX9-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[SHL]]
|
||||||
|
; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; GFX9-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SHL]](s32), [[C3]]
|
||||||
|
; GFX9-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SHL]](s32), [[C4]]
|
||||||
|
; GFX9-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[SHL]](s32)
|
||||||
|
; GFX9-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND]], [[SHL]](s32)
|
||||||
|
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[AND1]], [[SUB1]](s32)
|
||||||
|
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL1]]
|
||||||
|
; GFX9-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
||||||
|
; GFX9-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[AND1]], [[SUB]](s32)
|
||||||
|
; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[LSHR2]]
|
||||||
|
; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[AND]], [[SELECT]]
|
||||||
|
; GFX9-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[LSHR]], [[C5]]
|
||||||
|
; GFX9-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
|
||||||
|
; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC]](s96)
|
||||||
|
%0:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
%1:_(s32) = COPY $vgpr3
|
||||||
|
%2:_(s32) = G_CONSTANT i32 1
|
||||||
|
%3:_(s32) = G_SHL %2, %1
|
||||||
|
%4:_(s65) = G_TRUNC %0
|
||||||
|
%5:_(s65) = G_LSHR %4, %3
|
||||||
|
%6:_(s96) = G_ANYEXT %5
|
||||||
|
$vgpr0_vgpr1_vgpr2 = COPY %6
|
||||||
|
...
|
||||||
|
|
|
@ -1626,3 +1626,281 @@ body: |
|
||||||
%2:_(<2 x s128>) = G_SHL %0, %1
|
%2:_(<2 x s128>) = G_SHL %0, %1
|
||||||
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %2
|
$vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %2
|
||||||
...
|
...
|
||||||
|
|
||||||
|
---
|
||||||
|
name: test_shl_s65_s32
|
||||||
|
body: |
|
||||||
|
bb.0:
|
||||||
|
liveins: $vgpr0_vgpr1_vgpr2, $vgpr3
|
||||||
|
|
||||||
|
; SI-LABEL: name: test_shl_s65_s32
|
||||||
|
; SI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; SI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||||
|
; SI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %22(s64)
|
||||||
|
; SI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; SI-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; SI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; SI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; SI-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
|
||||||
|
; SI-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
|
||||||
|
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; SI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
|
||||||
|
; SI-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
|
||||||
|
; SI-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[MV]], [[TRUNC]](s32)
|
||||||
|
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV]], [[SUB1]](s32)
|
||||||
|
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[MV1]], [[TRUNC]](s32)
|
||||||
|
; SI-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL1]]
|
||||||
|
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
||||||
|
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[MV]], [[SUB]](s32)
|
||||||
|
; SI-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C2]]
|
||||||
|
; SI-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
|
||||||
|
; SI-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[MV1]], [[SELECT1]]
|
||||||
|
; SI-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
|
||||||
|
; SI-NEXT: [[TRUNC1:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; SI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC1]](s96)
|
||||||
|
; VI-LABEL: name: test_shl_s65_s32
|
||||||
|
; VI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; VI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||||
|
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %22(s64)
|
||||||
|
; VI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; VI-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; VI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; VI-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
|
||||||
|
; VI-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
|
||||||
|
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; VI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
|
||||||
|
; VI-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
|
||||||
|
; VI-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[MV]], [[TRUNC]](s32)
|
||||||
|
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV]], [[SUB1]](s32)
|
||||||
|
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[MV1]], [[TRUNC]](s32)
|
||||||
|
; VI-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL1]]
|
||||||
|
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
||||||
|
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[MV]], [[SUB]](s32)
|
||||||
|
; VI-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C2]]
|
||||||
|
; VI-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
|
||||||
|
; VI-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[MV1]], [[SELECT1]]
|
||||||
|
; VI-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
|
||||||
|
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; VI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC1]](s96)
|
||||||
|
; GFX9-LABEL: name: test_shl_s65_s32
|
||||||
|
; GFX9: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||||
|
; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %22(s64)
|
||||||
|
; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; GFX9-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
|
||||||
|
; GFX9-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
|
||||||
|
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; GFX9-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
|
||||||
|
; GFX9-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
|
||||||
|
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[MV]], [[TRUNC]](s32)
|
||||||
|
; GFX9-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV]], [[SUB1]](s32)
|
||||||
|
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[MV1]], [[TRUNC]](s32)
|
||||||
|
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL1]]
|
||||||
|
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
||||||
|
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[MV]], [[SUB]](s32)
|
||||||
|
; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C2]]
|
||||||
|
; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
|
||||||
|
; GFX9-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[MV1]], [[SELECT1]]
|
||||||
|
; GFX9-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
|
||||||
|
; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC1]](s96)
|
||||||
|
%0:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
%1:_(s32) = COPY $vgpr3
|
||||||
|
%2:_(s65) = G_TRUNC %0
|
||||||
|
%3:_(s65) = G_SHL %2, %3
|
||||||
|
%4:_(s96) = G_ANYEXT %3
|
||||||
|
$vgpr0_vgpr1_vgpr2 = COPY %4
|
||||||
|
...
|
||||||
|
|
||||||
|
---
|
||||||
|
name: test_shl_s65_s32_constant8
|
||||||
|
body: |
|
||||||
|
bb.0:
|
||||||
|
liveins: $vgpr0_vgpr1_vgpr2
|
||||||
|
|
||||||
|
; SI-LABEL: name: test_shl_s65_s32_constant8
|
||||||
|
; SI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; SI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %22(s64)
|
||||||
|
; SI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; SI-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; SI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; SI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; SI-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
|
||||||
|
; SI-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
|
||||||
|
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; SI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
|
||||||
|
; SI-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
|
||||||
|
; SI-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[MV]], [[TRUNC]](s32)
|
||||||
|
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV]], [[SUB1]](s32)
|
||||||
|
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[MV1]], [[TRUNC]](s32)
|
||||||
|
; SI-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL1]]
|
||||||
|
; SI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
||||||
|
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[MV]], [[SUB]](s32)
|
||||||
|
; SI-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C2]]
|
||||||
|
; SI-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
|
||||||
|
; SI-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[MV1]], [[SELECT1]]
|
||||||
|
; SI-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
|
||||||
|
; SI-NEXT: [[TRUNC1:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; SI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC1]](s96)
|
||||||
|
; VI-LABEL: name: test_shl_s65_s32_constant8
|
||||||
|
; VI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %22(s64)
|
||||||
|
; VI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; VI-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; VI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; VI-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
|
||||||
|
; VI-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
|
||||||
|
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; VI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
|
||||||
|
; VI-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
|
||||||
|
; VI-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[MV]], [[TRUNC]](s32)
|
||||||
|
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV]], [[SUB1]](s32)
|
||||||
|
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[MV1]], [[TRUNC]](s32)
|
||||||
|
; VI-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL1]]
|
||||||
|
; VI-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
||||||
|
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[MV]], [[SUB]](s32)
|
||||||
|
; VI-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C2]]
|
||||||
|
; VI-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
|
||||||
|
; VI-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[MV1]], [[SELECT1]]
|
||||||
|
; VI-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
|
||||||
|
; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; VI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC1]](s96)
|
||||||
|
; GFX9-LABEL: name: test_shl_s65_s32_constant8
|
||||||
|
; GFX9: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %22(s64)
|
||||||
|
; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; GFX9-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
|
||||||
|
; GFX9-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
|
||||||
|
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; GFX9-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
|
||||||
|
; GFX9-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
|
||||||
|
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[MV]], [[TRUNC]](s32)
|
||||||
|
; GFX9-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV]], [[SUB1]](s32)
|
||||||
|
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[MV1]], [[TRUNC]](s32)
|
||||||
|
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL1]]
|
||||||
|
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
||||||
|
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[MV]], [[SUB]](s32)
|
||||||
|
; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C2]]
|
||||||
|
; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
|
||||||
|
; GFX9-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[MV1]], [[SELECT1]]
|
||||||
|
; GFX9-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
|
||||||
|
; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC1]](s96)
|
||||||
|
%0:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
%1:_(s32) = G_CONSTANT i32 8
|
||||||
|
%2:_(s65) = G_TRUNC %0
|
||||||
|
%3:_(s65) = G_SHL %2, %3
|
||||||
|
%4:_(s96) = G_ANYEXT %3
|
||||||
|
$vgpr0_vgpr1_vgpr2 = COPY %4
|
||||||
|
...
|
||||||
|
|
||||||
|
---
|
||||||
|
name: test_shl_s65_s32_known_pow2
|
||||||
|
body: |
|
||||||
|
bb.0:
|
||||||
|
liveins: $vgpr0_vgpr1_vgpr2, $vgpr3
|
||||||
|
|
||||||
|
; SI-LABEL: name: test_shl_s65_s32_known_pow2
|
||||||
|
; SI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; SI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||||
|
; SI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
|
||||||
|
; SI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[C]], [[COPY1]](s32)
|
||||||
|
; SI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; SI-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; SI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; SI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; SI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; SI-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[C1]]
|
||||||
|
; SI-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SHL]]
|
||||||
|
; SI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; SI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SHL]](s32), [[C1]]
|
||||||
|
; SI-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SHL]](s32), [[C2]]
|
||||||
|
; SI-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[MV]], [[SHL]](s32)
|
||||||
|
; SI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV]], [[SUB1]](s32)
|
||||||
|
; SI-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[MV1]], [[SHL]](s32)
|
||||||
|
; SI-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL2]]
|
||||||
|
; SI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
||||||
|
; SI-NEXT: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[MV]], [[SUB]](s32)
|
||||||
|
; SI-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL1]], [[C3]]
|
||||||
|
; SI-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL3]]
|
||||||
|
; SI-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[MV1]], [[SELECT1]]
|
||||||
|
; SI-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
|
||||||
|
; SI-NEXT: [[TRUNC:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; SI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC]](s96)
|
||||||
|
; VI-LABEL: name: test_shl_s65_s32_known_pow2
|
||||||
|
; VI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; VI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||||
|
; VI-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
|
||||||
|
; VI-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[C]], [[COPY1]](s32)
|
||||||
|
; VI-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; VI-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; VI-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; VI-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; VI-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; VI-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[C1]]
|
||||||
|
; VI-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SHL]]
|
||||||
|
; VI-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; VI-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SHL]](s32), [[C1]]
|
||||||
|
; VI-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SHL]](s32), [[C2]]
|
||||||
|
; VI-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[MV]], [[SHL]](s32)
|
||||||
|
; VI-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV]], [[SUB1]](s32)
|
||||||
|
; VI-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[MV1]], [[SHL]](s32)
|
||||||
|
; VI-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL2]]
|
||||||
|
; VI-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
||||||
|
; VI-NEXT: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[MV]], [[SUB]](s32)
|
||||||
|
; VI-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL1]], [[C3]]
|
||||||
|
; VI-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL3]]
|
||||||
|
; VI-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[MV1]], [[SELECT1]]
|
||||||
|
; VI-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
|
||||||
|
; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; VI-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC]](s96)
|
||||||
|
; GFX9-LABEL: name: test_shl_s65_s32_known_pow2
|
||||||
|
; GFX9: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr3
|
||||||
|
; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
|
||||||
|
; GFX9-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[C]], [[COPY1]](s32)
|
||||||
|
; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
|
||||||
|
; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||||
|
; GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
|
||||||
|
; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV2]](s32), [[DEF]](s32)
|
||||||
|
; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||||
|
; GFX9-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[C1]]
|
||||||
|
; GFX9-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SHL]]
|
||||||
|
; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||||
|
; GFX9-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SHL]](s32), [[C1]]
|
||||||
|
; GFX9-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SHL]](s32), [[C2]]
|
||||||
|
; GFX9-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[MV]], [[SHL]](s32)
|
||||||
|
; GFX9-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV]], [[SUB1]](s32)
|
||||||
|
; GFX9-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[MV1]], [[SHL]](s32)
|
||||||
|
; GFX9-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL2]]
|
||||||
|
; GFX9-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
|
||||||
|
; GFX9-NEXT: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[MV]], [[SUB]](s32)
|
||||||
|
; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL1]], [[C3]]
|
||||||
|
; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL3]]
|
||||||
|
; GFX9-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[MV1]], [[SELECT1]]
|
||||||
|
; GFX9-NEXT: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
|
||||||
|
; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s96) = G_TRUNC [[MV2]](s128)
|
||||||
|
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[TRUNC]](s96)
|
||||||
|
%0:_(s96) = COPY $vgpr0_vgpr1_vgpr2
|
||||||
|
%1:_(s32) = COPY $vgpr3
|
||||||
|
%2:_(s32) = G_CONSTANT i32 1
|
||||||
|
%3:_(s32) = G_SHL %2, %1
|
||||||
|
%4:_(s65) = G_TRUNC %0
|
||||||
|
%5:_(s65) = G_SHL %4, %3
|
||||||
|
%6:_(s96) = G_ANYEXT %5
|
||||||
|
$vgpr0_vgpr1_vgpr2 = COPY %6
|
||||||
|
...
|
||||||
|
|
|
@ -1572,3 +1572,232 @@ define amdgpu_ps <2 x i64> @s_lshr_v2i64(<2 x i64> inreg %value, <2 x i64> inreg
|
||||||
%result = lshr <2 x i64> %value, %amount
|
%result = lshr <2 x i64> %value, %amount
|
||||||
ret <2 x i64> %result
|
ret <2 x i64> %result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
define i65 @v_lshr_i65(i65 %value, i65 %amount) {
|
||||||
|
; GFX6-LABEL: v_lshr_i65:
|
||||||
|
; GFX6: ; %bb.0:
|
||||||
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX6-NEXT: v_and_b32_e32 v4, 1, v2
|
||||||
|
; GFX6-NEXT: v_mov_b32_e32 v5, 0
|
||||||
|
; GFX6-NEXT: v_sub_i32_e32 v8, vcc, 64, v3
|
||||||
|
; GFX6-NEXT: v_subrev_i32_e32 v2, vcc, 64, v3
|
||||||
|
; GFX6-NEXT: v_lshr_b64 v[6:7], v[0:1], v3
|
||||||
|
; GFX6-NEXT: v_lshl_b64 v[8:9], v[4:5], v8
|
||||||
|
; GFX6-NEXT: v_lshr_b64 v[10:11], v[4:5], v3
|
||||||
|
; GFX6-NEXT: v_lshr_b64 v[4:5], v[4:5], v2
|
||||||
|
; GFX6-NEXT: v_or_b32_e32 v6, v6, v8
|
||||||
|
; GFX6-NEXT: v_or_b32_e32 v7, v7, v9
|
||||||
|
; GFX6-NEXT: v_cmp_gt_u32_e32 vcc, 64, v3
|
||||||
|
; GFX6-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc
|
||||||
|
; GFX6-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc
|
||||||
|
; GFX6-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v3
|
||||||
|
; GFX6-NEXT: v_cndmask_b32_e64 v0, v2, v0, s[4:5]
|
||||||
|
; GFX6-NEXT: v_cndmask_b32_e64 v1, v4, v1, s[4:5]
|
||||||
|
; GFX6-NEXT: v_cndmask_b32_e32 v2, 0, v10, vcc
|
||||||
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
;
|
||||||
|
; GFX8-LABEL: v_lshr_i65:
|
||||||
|
; GFX8: ; %bb.0:
|
||||||
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX8-NEXT: v_and_b32_e32 v4, 1, v2
|
||||||
|
; GFX8-NEXT: v_mov_b32_e32 v5, 0
|
||||||
|
; GFX8-NEXT: v_sub_u32_e32 v8, vcc, 64, v3
|
||||||
|
; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, 64, v3
|
||||||
|
; GFX8-NEXT: v_lshrrev_b64 v[6:7], v3, v[0:1]
|
||||||
|
; GFX8-NEXT: v_lshlrev_b64 v[8:9], v8, v[4:5]
|
||||||
|
; GFX8-NEXT: v_lshrrev_b64 v[10:11], v3, v[4:5]
|
||||||
|
; GFX8-NEXT: v_lshrrev_b64 v[4:5], v2, v[4:5]
|
||||||
|
; GFX8-NEXT: v_or_b32_e32 v6, v6, v8
|
||||||
|
; GFX8-NEXT: v_or_b32_e32 v7, v7, v9
|
||||||
|
; GFX8-NEXT: v_cmp_gt_u32_e32 vcc, 64, v3
|
||||||
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc
|
||||||
|
; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc
|
||||||
|
; GFX8-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v3
|
||||||
|
; GFX8-NEXT: v_cndmask_b32_e64 v0, v2, v0, s[4:5]
|
||||||
|
; GFX8-NEXT: v_cndmask_b32_e64 v1, v4, v1, s[4:5]
|
||||||
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, 0, v10, vcc
|
||||||
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
;
|
||||||
|
; GFX9-LABEL: v_lshr_i65:
|
||||||
|
; GFX9: ; %bb.0:
|
||||||
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX9-NEXT: v_and_b32_e32 v4, 1, v2
|
||||||
|
; GFX9-NEXT: v_mov_b32_e32 v5, 0
|
||||||
|
; GFX9-NEXT: v_sub_u32_e32 v8, 64, v3
|
||||||
|
; GFX9-NEXT: v_subrev_u32_e32 v2, 64, v3
|
||||||
|
; GFX9-NEXT: v_lshrrev_b64 v[6:7], v3, v[0:1]
|
||||||
|
; GFX9-NEXT: v_lshlrev_b64 v[8:9], v8, v[4:5]
|
||||||
|
; GFX9-NEXT: v_lshrrev_b64 v[10:11], v3, v[4:5]
|
||||||
|
; GFX9-NEXT: v_lshrrev_b64 v[4:5], v2, v[4:5]
|
||||||
|
; GFX9-NEXT: v_or_b32_e32 v6, v6, v8
|
||||||
|
; GFX9-NEXT: v_or_b32_e32 v7, v7, v9
|
||||||
|
; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, 64, v3
|
||||||
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc
|
||||||
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc
|
||||||
|
; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v3
|
||||||
|
; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, v0, s[4:5]
|
||||||
|
; GFX9-NEXT: v_cndmask_b32_e64 v1, v4, v1, s[4:5]
|
||||||
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, 0, v10, vcc
|
||||||
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
;
|
||||||
|
; GFX10-LABEL: v_lshr_i65:
|
||||||
|
; GFX10: ; %bb.0:
|
||||||
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
|
||||||
|
; GFX10-NEXT: v_and_b32_e32 v4, 1, v2
|
||||||
|
; GFX10-NEXT: v_mov_b32_e32 v5, 0
|
||||||
|
; GFX10-NEXT: v_sub_nc_u32_e32 v2, 64, v3
|
||||||
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v10, 64, v3
|
||||||
|
; GFX10-NEXT: v_lshrrev_b64 v[6:7], v3, v[0:1]
|
||||||
|
; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 64, v3
|
||||||
|
; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 0, v3
|
||||||
|
; GFX10-NEXT: v_lshlrev_b64 v[8:9], v2, v[4:5]
|
||||||
|
; GFX10-NEXT: v_lshrrev_b64 v[10:11], v10, v[4:5]
|
||||||
|
; GFX10-NEXT: v_lshrrev_b64 v[4:5], v3, v[4:5]
|
||||||
|
; GFX10-NEXT: v_or_b32_e32 v2, v6, v8
|
||||||
|
; GFX10-NEXT: v_or_b32_e32 v6, v7, v9
|
||||||
|
; GFX10-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc_lo
|
||||||
|
; GFX10-NEXT: v_cndmask_b32_e32 v5, v11, v6, vcc_lo
|
||||||
|
; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, v0, s4
|
||||||
|
; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, v1, s4
|
||||||
|
; GFX10-NEXT: v_cndmask_b32_e32 v2, 0, v4, vcc_lo
|
||||||
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
%result = lshr i65 %value, %amount
|
||||||
|
ret i65 %result
|
||||||
|
}
|
||||||
|
|
||||||
|
define i65 @v_lshr_i65_33(i65 %value) {
|
||||||
|
; GFX6-LABEL: v_lshr_i65_33:
|
||||||
|
; GFX6: ; %bb.0:
|
||||||
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX6-NEXT: v_mov_b32_e32 v3, v1
|
||||||
|
; GFX6-NEXT: v_and_b32_e32 v0, 1, v2
|
||||||
|
; GFX6-NEXT: v_mov_b32_e32 v1, 0
|
||||||
|
; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], 31
|
||||||
|
; GFX6-NEXT: v_lshrrev_b32_e32 v2, 1, v3
|
||||||
|
; GFX6-NEXT: v_or_b32_e32 v0, v2, v0
|
||||||
|
; GFX6-NEXT: v_mov_b32_e32 v2, 0
|
||||||
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
;
|
||||||
|
; GFX8-LABEL: v_lshr_i65_33:
|
||||||
|
; GFX8: ; %bb.0:
|
||||||
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX8-NEXT: v_mov_b32_e32 v3, v1
|
||||||
|
; GFX8-NEXT: v_and_b32_e32 v0, 1, v2
|
||||||
|
; GFX8-NEXT: v_mov_b32_e32 v1, 0
|
||||||
|
; GFX8-NEXT: v_lshlrev_b64 v[0:1], 31, v[0:1]
|
||||||
|
; GFX8-NEXT: v_lshrrev_b32_e32 v2, 1, v3
|
||||||
|
; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
|
||||||
|
; GFX8-NEXT: v_mov_b32_e32 v2, 0
|
||||||
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
;
|
||||||
|
; GFX9-LABEL: v_lshr_i65_33:
|
||||||
|
; GFX9: ; %bb.0:
|
||||||
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX9-NEXT: v_mov_b32_e32 v3, v1
|
||||||
|
; GFX9-NEXT: v_and_b32_e32 v0, 1, v2
|
||||||
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0
|
||||||
|
; GFX9-NEXT: v_lshlrev_b64 v[0:1], 31, v[0:1]
|
||||||
|
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 1, v3
|
||||||
|
; GFX9-NEXT: v_or_b32_e32 v0, v2, v0
|
||||||
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
||||||
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
;
|
||||||
|
; GFX10-LABEL: v_lshr_i65_33:
|
||||||
|
; GFX10: ; %bb.0:
|
||||||
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
|
||||||
|
; GFX10-NEXT: v_mov_b32_e32 v3, v1
|
||||||
|
; GFX10-NEXT: v_mov_b32_e32 v1, 0
|
||||||
|
; GFX10-NEXT: v_and_b32_e32 v0, 1, v2
|
||||||
|
; GFX10-NEXT: v_lshrrev_b32_e32 v2, 1, v3
|
||||||
|
; GFX10-NEXT: v_lshlrev_b64 v[0:1], 31, v[0:1]
|
||||||
|
; GFX10-NEXT: v_or_b32_e32 v0, v2, v0
|
||||||
|
; GFX10-NEXT: v_mov_b32_e32 v2, 0
|
||||||
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
%result = lshr i65 %value, 33
|
||||||
|
ret i65 %result
|
||||||
|
}
|
||||||
|
|
||||||
|
define amdgpu_ps i65 @s_lshr_i65(i65 inreg %value, i65 inreg %amount) {
|
||||||
|
; GCN-LABEL: s_lshr_i65:
|
||||||
|
; GCN: ; %bb.0:
|
||||||
|
; GCN-NEXT: s_and_b64 s[4:5], s[2:3], 1
|
||||||
|
; GCN-NEXT: s_sub_i32 s10, s3, 64
|
||||||
|
; GCN-NEXT: s_sub_i32 s8, 64, s3
|
||||||
|
; GCN-NEXT: s_cmp_lt_u32 s3, 64
|
||||||
|
; GCN-NEXT: s_cselect_b32 s11, 1, 0
|
||||||
|
; GCN-NEXT: s_cmp_eq_u32 s3, 0
|
||||||
|
; GCN-NEXT: s_cselect_b32 s12, 1, 0
|
||||||
|
; GCN-NEXT: s_lshr_b64 s[6:7], s[4:5], s3
|
||||||
|
; GCN-NEXT: s_lshr_b64 s[2:3], s[0:1], s3
|
||||||
|
; GCN-NEXT: s_lshl_b64 s[8:9], s[4:5], s8
|
||||||
|
; GCN-NEXT: s_or_b64 s[2:3], s[2:3], s[8:9]
|
||||||
|
; GCN-NEXT: s_lshr_b64 s[4:5], s[4:5], s10
|
||||||
|
; GCN-NEXT: s_cmp_lg_u32 s11, 0
|
||||||
|
; GCN-NEXT: s_cselect_b64 s[2:3], s[2:3], s[4:5]
|
||||||
|
; GCN-NEXT: s_cmp_lg_u32 s12, 0
|
||||||
|
; GCN-NEXT: s_cselect_b64 s[0:1], s[0:1], s[2:3]
|
||||||
|
; GCN-NEXT: s_cmp_lg_u32 s11, 0
|
||||||
|
; GCN-NEXT: s_cselect_b64 s[2:3], s[6:7], 0
|
||||||
|
; GCN-NEXT: ; return to shader part epilog
|
||||||
|
;
|
||||||
|
; GFX10-LABEL: s_lshr_i65:
|
||||||
|
; GFX10: ; %bb.0:
|
||||||
|
; GFX10-NEXT: s_and_b64 s[4:5], s[2:3], 1
|
||||||
|
; GFX10-NEXT: s_sub_i32 s10, s3, 64
|
||||||
|
; GFX10-NEXT: s_sub_i32 s2, 64, s3
|
||||||
|
; GFX10-NEXT: s_cmp_lt_u32 s3, 64
|
||||||
|
; GFX10-NEXT: s_cselect_b32 s11, 1, 0
|
||||||
|
; GFX10-NEXT: s_cmp_eq_u32 s3, 0
|
||||||
|
; GFX10-NEXT: s_cselect_b32 s12, 1, 0
|
||||||
|
; GFX10-NEXT: s_lshr_b64 s[6:7], s[0:1], s3
|
||||||
|
; GFX10-NEXT: s_lshl_b64 s[8:9], s[4:5], s2
|
||||||
|
; GFX10-NEXT: s_lshr_b64 s[2:3], s[4:5], s3
|
||||||
|
; GFX10-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
|
||||||
|
; GFX10-NEXT: s_lshr_b64 s[4:5], s[4:5], s10
|
||||||
|
; GFX10-NEXT: s_cmp_lg_u32 s11, 0
|
||||||
|
; GFX10-NEXT: s_cselect_b64 s[4:5], s[6:7], s[4:5]
|
||||||
|
; GFX10-NEXT: s_cmp_lg_u32 s12, 0
|
||||||
|
; GFX10-NEXT: s_cselect_b64 s[0:1], s[0:1], s[4:5]
|
||||||
|
; GFX10-NEXT: s_cmp_lg_u32 s11, 0
|
||||||
|
; GFX10-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
|
||||||
|
; GFX10-NEXT: ; return to shader part epilog
|
||||||
|
%result = lshr i65 %value, %amount
|
||||||
|
ret i65 %result
|
||||||
|
}
|
||||||
|
|
||||||
|
define amdgpu_ps i65 @s_lshr_i65_33(i65 inreg %value) {
|
||||||
|
; GCN-LABEL: s_lshr_i65_33:
|
||||||
|
; GCN: ; %bb.0:
|
||||||
|
; GCN-NEXT: s_and_b64 s[2:3], s[2:3], 1
|
||||||
|
; GCN-NEXT: s_lshr_b32 s0, s1, 1
|
||||||
|
; GCN-NEXT: s_mov_b32 s1, 0
|
||||||
|
; GCN-NEXT: s_lshl_b64 s[4:5], s[2:3], 31
|
||||||
|
; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
|
||||||
|
; GCN-NEXT: s_lshr_b32 s2, s3, 1
|
||||||
|
; GCN-NEXT: ; return to shader part epilog
|
||||||
|
;
|
||||||
|
; GFX10-LABEL: s_lshr_i65_33:
|
||||||
|
; GFX10: ; %bb.0:
|
||||||
|
; GFX10-NEXT: s_and_b64 s[2:3], s[2:3], 1
|
||||||
|
; GFX10-NEXT: s_lshr_b32 s0, s1, 1
|
||||||
|
; GFX10-NEXT: s_mov_b32 s1, 0
|
||||||
|
; GFX10-NEXT: s_lshl_b64 s[4:5], s[2:3], 31
|
||||||
|
; GFX10-NEXT: s_lshr_b32 s2, s3, 1
|
||||||
|
; GFX10-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
|
||||||
|
; GFX10-NEXT: ; return to shader part epilog
|
||||||
|
%result = lshr i65 %value, 33
|
||||||
|
ret i65 %result
|
||||||
|
}
|
||||||
|
|
||||||
|
; FIXME: Argument lowering asserts
|
||||||
|
; define <2 x i65> @v_lshr_v2i65(<2 x i65> %value, <2 x i65> %amount) {
|
||||||
|
; %result = lshr <2 x i65> %value, %amount
|
||||||
|
; ret <2 x i65> %result
|
||||||
|
; }
|
||||||
|
|
||||||
|
; define amdgpu_ps <2 x i65> @s_lshr_v2i65(<2 x i65> inreg %value, <2 x i65> inreg %amount) {
|
||||||
|
; %result = lshr <2 x i65> %value, %amount
|
||||||
|
; ret <2 x i65> %result
|
||||||
|
; }
|
||||||
|
|
|
@ -1582,3 +1582,201 @@ define amdgpu_ps <2 x i64> @s_shl_v2i64(<2 x i64> inreg %value, <2 x i64> inreg
|
||||||
%result = shl <2 x i64> %value, %amount
|
%result = shl <2 x i64> %value, %amount
|
||||||
ret <2 x i64> %result
|
ret <2 x i64> %result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
define i65 @v_shl_i65(i65 %value, i65 %amount) {
|
||||||
|
; GFX6-LABEL: v_shl_i65:
|
||||||
|
; GFX6: ; %bb.0:
|
||||||
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX6-NEXT: v_sub_i32_e32 v4, vcc, 64, v3
|
||||||
|
; GFX6-NEXT: v_lshr_b64 v[4:5], v[0:1], v4
|
||||||
|
; GFX6-NEXT: v_lshl_b64 v[5:6], v[2:3], v3
|
||||||
|
; GFX6-NEXT: v_subrev_i32_e32 v8, vcc, 64, v3
|
||||||
|
; GFX6-NEXT: v_lshl_b64 v[6:7], v[0:1], v3
|
||||||
|
; GFX6-NEXT: v_or_b32_e32 v9, v4, v5
|
||||||
|
; GFX6-NEXT: v_lshl_b64 v[4:5], v[0:1], v8
|
||||||
|
; GFX6-NEXT: v_cmp_gt_u32_e32 vcc, 64, v3
|
||||||
|
; GFX6-NEXT: v_cndmask_b32_e32 v0, 0, v6, vcc
|
||||||
|
; GFX6-NEXT: v_cndmask_b32_e32 v1, 0, v7, vcc
|
||||||
|
; GFX6-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc
|
||||||
|
; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
|
||||||
|
; GFX6-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
|
||||||
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
;
|
||||||
|
; GFX8-LABEL: v_shl_i65:
|
||||||
|
; GFX8: ; %bb.0:
|
||||||
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX8-NEXT: v_sub_u32_e32 v4, vcc, 64, v3
|
||||||
|
; GFX8-NEXT: v_lshrrev_b64 v[4:5], v4, v[0:1]
|
||||||
|
; GFX8-NEXT: v_lshlrev_b64 v[5:6], v3, v[2:3]
|
||||||
|
; GFX8-NEXT: v_subrev_u32_e32 v8, vcc, 64, v3
|
||||||
|
; GFX8-NEXT: v_lshlrev_b64 v[6:7], v3, v[0:1]
|
||||||
|
; GFX8-NEXT: v_or_b32_e32 v9, v4, v5
|
||||||
|
; GFX8-NEXT: v_lshlrev_b64 v[4:5], v8, v[0:1]
|
||||||
|
; GFX8-NEXT: v_cmp_gt_u32_e32 vcc, 64, v3
|
||||||
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, 0, v6, vcc
|
||||||
|
; GFX8-NEXT: v_cndmask_b32_e32 v1, 0, v7, vcc
|
||||||
|
; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc
|
||||||
|
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
|
||||||
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
|
||||||
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
;
|
||||||
|
; GFX9-LABEL: v_shl_i65:
|
||||||
|
; GFX9: ; %bb.0:
|
||||||
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX9-NEXT: v_sub_u32_e32 v4, 64, v3
|
||||||
|
; GFX9-NEXT: v_lshrrev_b64 v[4:5], v4, v[0:1]
|
||||||
|
; GFX9-NEXT: v_lshlrev_b64 v[5:6], v3, v[2:3]
|
||||||
|
; GFX9-NEXT: v_subrev_u32_e32 v8, 64, v3
|
||||||
|
; GFX9-NEXT: v_lshlrev_b64 v[6:7], v3, v[0:1]
|
||||||
|
; GFX9-NEXT: v_or_b32_e32 v9, v4, v5
|
||||||
|
; GFX9-NEXT: v_lshlrev_b64 v[4:5], v8, v[0:1]
|
||||||
|
; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, 64, v3
|
||||||
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, 0, v6, vcc
|
||||||
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, 0, v7, vcc
|
||||||
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc
|
||||||
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
|
||||||
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
|
||||||
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
;
|
||||||
|
; GFX10-LABEL: v_shl_i65:
|
||||||
|
; GFX10: ; %bb.0:
|
||||||
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
|
||||||
|
; GFX10-NEXT: v_sub_nc_u32_e32 v6, 64, v3
|
||||||
|
; GFX10-NEXT: v_lshlrev_b64 v[4:5], v3, v[2:3]
|
||||||
|
; GFX10-NEXT: v_subrev_nc_u32_e32 v8, 64, v3
|
||||||
|
; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 64, v3
|
||||||
|
; GFX10-NEXT: v_lshrrev_b64 v[5:6], v6, v[0:1]
|
||||||
|
; GFX10-NEXT: v_lshlrev_b64 v[6:7], v3, v[0:1]
|
||||||
|
; GFX10-NEXT: v_lshlrev_b64 v[8:9], v8, v[0:1]
|
||||||
|
; GFX10-NEXT: v_or_b32_e32 v1, v5, v4
|
||||||
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, 0, v6, vcc_lo
|
||||||
|
; GFX10-NEXT: v_cndmask_b32_e32 v4, v8, v1, vcc_lo
|
||||||
|
; GFX10-NEXT: v_cndmask_b32_e32 v1, 0, v7, vcc_lo
|
||||||
|
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v3
|
||||||
|
; GFX10-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc_lo
|
||||||
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
%result = shl i65 %value, %amount
|
||||||
|
ret i65 %result
|
||||||
|
}
|
||||||
|
|
||||||
|
define i65 @v_shl_i65_33(i65 %value) {
|
||||||
|
; GFX6-LABEL: v_shl_i65_33:
|
||||||
|
; GFX6: ; %bb.0:
|
||||||
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX6-NEXT: v_lshlrev_b32_e32 v4, 1, v0
|
||||||
|
; GFX6-NEXT: v_lshr_b64 v[2:3], v[0:1], 31
|
||||||
|
; GFX6-NEXT: v_mov_b32_e32 v0, 0
|
||||||
|
; GFX6-NEXT: v_mov_b32_e32 v1, v4
|
||||||
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
;
|
||||||
|
; GFX8-LABEL: v_shl_i65_33:
|
||||||
|
; GFX8: ; %bb.0:
|
||||||
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX8-NEXT: v_lshlrev_b32_e32 v4, 1, v0
|
||||||
|
; GFX8-NEXT: v_lshrrev_b64 v[2:3], 31, v[0:1]
|
||||||
|
; GFX8-NEXT: v_mov_b32_e32 v0, 0
|
||||||
|
; GFX8-NEXT: v_mov_b32_e32 v1, v4
|
||||||
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
;
|
||||||
|
; GFX9-LABEL: v_shl_i65_33:
|
||||||
|
; GFX9: ; %bb.0:
|
||||||
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX9-NEXT: v_lshlrev_b32_e32 v4, 1, v0
|
||||||
|
; GFX9-NEXT: v_lshrrev_b64 v[2:3], 31, v[0:1]
|
||||||
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
||||||
|
; GFX9-NEXT: v_mov_b32_e32 v1, v4
|
||||||
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
;
|
||||||
|
; GFX10-LABEL: v_shl_i65_33:
|
||||||
|
; GFX10: ; %bb.0:
|
||||||
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||||
|
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
|
||||||
|
; GFX10-NEXT: v_lshlrev_b32_e32 v4, 1, v0
|
||||||
|
; GFX10-NEXT: v_lshrrev_b64 v[2:3], 31, v[0:1]
|
||||||
|
; GFX10-NEXT: v_mov_b32_e32 v0, 0
|
||||||
|
; GFX10-NEXT: v_mov_b32_e32 v1, v4
|
||||||
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
||||||
|
%result = shl i65 %value, 33
|
||||||
|
ret i65 %result
|
||||||
|
}
|
||||||
|
|
||||||
|
define amdgpu_ps i65 @s_shl_i65(i65 inreg %value, i65 inreg %amount) {
|
||||||
|
; GCN-LABEL: s_shl_i65:
|
||||||
|
; GCN: ; %bb.0:
|
||||||
|
; GCN-NEXT: s_sub_i32 s10, s3, 64
|
||||||
|
; GCN-NEXT: s_sub_i32 s6, 64, s3
|
||||||
|
; GCN-NEXT: s_cmp_lt_u32 s3, 64
|
||||||
|
; GCN-NEXT: s_cselect_b32 s11, 1, 0
|
||||||
|
; GCN-NEXT: s_cmp_eq_u32 s3, 0
|
||||||
|
; GCN-NEXT: s_cselect_b32 s12, 1, 0
|
||||||
|
; GCN-NEXT: s_lshr_b64 s[6:7], s[0:1], s6
|
||||||
|
; GCN-NEXT: s_lshl_b64 s[8:9], s[2:3], s3
|
||||||
|
; GCN-NEXT: s_lshl_b64 s[4:5], s[0:1], s3
|
||||||
|
; GCN-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
|
||||||
|
; GCN-NEXT: s_lshl_b64 s[8:9], s[0:1], s10
|
||||||
|
; GCN-NEXT: s_cmp_lg_u32 s11, 0
|
||||||
|
; GCN-NEXT: s_cselect_b64 s[0:1], s[4:5], 0
|
||||||
|
; GCN-NEXT: s_cselect_b64 s[4:5], s[6:7], s[8:9]
|
||||||
|
; GCN-NEXT: s_cmp_lg_u32 s12, 0
|
||||||
|
; GCN-NEXT: s_cselect_b64 s[2:3], s[2:3], s[4:5]
|
||||||
|
; GCN-NEXT: ; return to shader part epilog
|
||||||
|
;
|
||||||
|
; GFX10-LABEL: s_shl_i65:
|
||||||
|
; GFX10: ; %bb.0:
|
||||||
|
; GFX10-NEXT: s_sub_i32 s10, s3, 64
|
||||||
|
; GFX10-NEXT: s_sub_i32 s4, 64, s3
|
||||||
|
; GFX10-NEXT: s_cmp_lt_u32 s3, 64
|
||||||
|
; GFX10-NEXT: s_cselect_b32 s11, 1, 0
|
||||||
|
; GFX10-NEXT: s_cmp_eq_u32 s3, 0
|
||||||
|
; GFX10-NEXT: s_cselect_b32 s12, 1, 0
|
||||||
|
; GFX10-NEXT: s_lshr_b64 s[4:5], s[0:1], s4
|
||||||
|
; GFX10-NEXT: s_lshl_b64 s[6:7], s[2:3], s3
|
||||||
|
; GFX10-NEXT: s_lshl_b64 s[8:9], s[0:1], s3
|
||||||
|
; GFX10-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
|
||||||
|
; GFX10-NEXT: s_lshl_b64 s[6:7], s[0:1], s10
|
||||||
|
; GFX10-NEXT: s_cmp_lg_u32 s11, 0
|
||||||
|
; GFX10-NEXT: s_cselect_b64 s[0:1], s[8:9], 0
|
||||||
|
; GFX10-NEXT: s_cselect_b64 s[4:5], s[4:5], s[6:7]
|
||||||
|
; GFX10-NEXT: s_cmp_lg_u32 s12, 0
|
||||||
|
; GFX10-NEXT: s_cselect_b64 s[2:3], s[2:3], s[4:5]
|
||||||
|
; GFX10-NEXT: ; return to shader part epilog
|
||||||
|
%result = shl i65 %value, %amount
|
||||||
|
ret i65 %result
|
||||||
|
}
|
||||||
|
|
||||||
|
define amdgpu_ps i65 @s_shl_i65_33(i65 inreg %value) {
|
||||||
|
; GCN-LABEL: s_shl_i65_33:
|
||||||
|
; GCN: ; %bb.0:
|
||||||
|
; GCN-NEXT: s_lshl_b32 s4, s0, 1
|
||||||
|
; GCN-NEXT: s_mov_b32 s6, 0
|
||||||
|
; GCN-NEXT: s_lshl_b32 s7, s2, 1
|
||||||
|
; GCN-NEXT: s_lshr_b64 s[0:1], s[0:1], 31
|
||||||
|
; GCN-NEXT: s_or_b64 s[2:3], s[6:7], s[0:1]
|
||||||
|
; GCN-NEXT: s_mov_b32 s0, 0
|
||||||
|
; GCN-NEXT: s_mov_b32 s1, s4
|
||||||
|
; GCN-NEXT: ; return to shader part epilog
|
||||||
|
;
|
||||||
|
; GFX10-LABEL: s_shl_i65_33:
|
||||||
|
; GFX10: ; %bb.0:
|
||||||
|
; GFX10-NEXT: s_mov_b32 s4, 0
|
||||||
|
; GFX10-NEXT: s_lshl_b32 s5, s2, 1
|
||||||
|
; GFX10-NEXT: s_lshr_b64 s[2:3], s[0:1], 31
|
||||||
|
; GFX10-NEXT: s_lshl_b32 s1, s0, 1
|
||||||
|
; GFX10-NEXT: s_or_b64 s[2:3], s[4:5], s[2:3]
|
||||||
|
; GFX10-NEXT: s_mov_b32 s0, 0
|
||||||
|
; GFX10-NEXT: ; return to shader part epilog
|
||||||
|
%result = shl i65 %value, 33
|
||||||
|
ret i65 %result
|
||||||
|
}
|
||||||
|
|
||||||
|
; FIXME: Argument lowering asserts
|
||||||
|
; define <2 x i65> @v_shl_v2i65(<2 x i65> %value, <2 x i65> %amount) {
|
||||||
|
; %result = shl <2 x i65> %value, %amount
|
||||||
|
; ret <2 x i65> %result
|
||||||
|
; }
|
||||||
|
|
||||||
|
; define amdgpu_ps <2 x i65> @s_shl_v2i65(<2 x i65> inreg %value, <2 x i65> inreg %amount) {
|
||||||
|
; %result = shl <2 x i65> %value, %amount
|
||||||
|
; ret <2 x i65> %result
|
||||||
|
; }
|
||||||
|
|
Loading…
Reference in New Issue