forked from OSchip/llvm-project
AMDGPU/GlobalISel: Avoid illegal vector exts for add/sub/mul
When expanding scalar packed operations, we should not introduce illegal vector casts LegalizerHelper introduces. We're not in a legalizer context, and there's no RegBankSelect apply or legalize worklist.
This commit is contained in:
parent
ed72bcae34
commit
627bb31a28
|
@ -2096,20 +2096,24 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
|
|||
MachineIRBuilder B(MI);
|
||||
ApplyRegBankMapping ApplySALU(*this, MRI, &AMDGPU::SGPRRegBank);
|
||||
GISelObserverWrapper Observer(&ApplySALU);
|
||||
LegalizerHelper Helper(*MF, Observer, B);
|
||||
|
||||
if (DstTy.isVector()) {
|
||||
// FIXME: Multi-step legalization is awkward here. We're relying on the
|
||||
// fact that widenScalar leaves the instruction in place in this case, and
|
||||
// we have to do it in this order.
|
||||
if (Helper.widenScalar(MI, 0, LLT::vector(2, 32)) !=
|
||||
LegalizerHelper::Legalized)
|
||||
llvm_unreachable("widen scalar should have succeeded");
|
||||
B.setChangeObserver(Observer);
|
||||
|
||||
if (Helper.fewerElementsVector(MI, 0, S32) != LegalizerHelper::Legalized)
|
||||
llvm_unreachable("fewerElementsVector should have succeeded");
|
||||
Register WideSrc0Lo, WideSrc0Hi;
|
||||
Register WideSrc1Lo, WideSrc1Hi;
|
||||
|
||||
std::tie(WideSrc0Lo, WideSrc0Hi)
|
||||
= unpackV2S16ToS32(B, MI.getOperand(1).getReg(), AMDGPU::G_ANYEXT);
|
||||
std::tie(WideSrc1Lo, WideSrc1Hi)
|
||||
= unpackV2S16ToS32(B, MI.getOperand(2).getReg(), AMDGPU::G_ANYEXT);
|
||||
auto Lo = B.buildInstr(MI.getOpcode(), {S32}, {WideSrc0Lo, WideSrc1Lo});
|
||||
auto Hi = B.buildInstr(MI.getOpcode(), {S32}, {WideSrc0Hi, WideSrc1Hi});
|
||||
B.buildBuildVectorTrunc(DstReg, {Lo.getReg(0), Hi.getReg(0)});
|
||||
MI.eraseFromParent();
|
||||
} else {
|
||||
LegalizerHelper Helper(*MF, Observer, B);
|
||||
|
||||
if (Helper.widenScalar(MI, 0, S32) != LegalizerHelper::Legalized)
|
||||
llvm_unreachable("widen scalar should have succeeded");
|
||||
}
|
||||
|
|
|
@ -0,0 +1,374 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s
|
||||
; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=fiji < %s | FileCheck -check-prefix=GFX8 %s
|
||||
|
||||
define <2 x i16> @v_add_v2i16(<2 x i16> %a, <2 x i16> %b) {
|
||||
; GFX9-LABEL: v_add_v2i16:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX9-NEXT: v_pk_add_u16 v0, v0, v1
|
||||
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
||||
;
|
||||
; GFX8-LABEL: v_add_v2i16:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX8-NEXT: v_add_u16_e32 v2, v0, v1
|
||||
; GFX8-NEXT: v_add_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
|
||||
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
||||
%add = add <2 x i16> %a, %b
|
||||
ret <2 x i16> %add
|
||||
}
|
||||
|
||||
define <2 x i16> @v_add_v2i16_fneg_lhs(<2 x half> %a, <2 x i16> %b) {
|
||||
; GFX9-LABEL: v_add_v2i16_fneg_lhs:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX9-NEXT: v_pk_add_u16 v0, v0, v1 neg_lo:[1,0] neg_hi:[1,0]
|
||||
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
||||
;
|
||||
; GFX8-LABEL: v_add_v2i16_fneg_lhs:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX8-NEXT: v_xor_b32_e32 v0, 0x80008000, v0
|
||||
; GFX8-NEXT: v_add_u16_e32 v2, v0, v1
|
||||
; GFX8-NEXT: v_add_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
|
||||
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
||||
%neg.a = fneg <2 x half> %a
|
||||
%cast.neg.a = bitcast <2 x half> %neg.a to <2 x i16>
|
||||
%add = add <2 x i16> %cast.neg.a, %b
|
||||
ret <2 x i16> %add
|
||||
}
|
||||
|
||||
define <2 x i16> @v_add_v2i16_fneg_rhs(<2 x i16> %a, <2 x half> %b) {
|
||||
; GFX9-LABEL: v_add_v2i16_fneg_rhs:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX9-NEXT: v_pk_add_u16 v0, v0, v1 neg_lo:[0,1] neg_hi:[0,1]
|
||||
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
||||
;
|
||||
; GFX8-LABEL: v_add_v2i16_fneg_rhs:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX8-NEXT: v_xor_b32_e32 v1, 0x80008000, v1
|
||||
; GFX8-NEXT: v_add_u16_e32 v2, v0, v1
|
||||
; GFX8-NEXT: v_add_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
|
||||
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
||||
%neg.b = fneg <2 x half> %b
|
||||
%cast.neg.b = bitcast <2 x half> %neg.b to <2 x i16>
|
||||
%add = add <2 x i16> %a, %cast.neg.b
|
||||
ret <2 x i16> %add
|
||||
}
|
||||
|
||||
define <2 x i16> @v_add_v2i16_fneg_lhs_fneg_rhs(<2 x half> %a, <2 x half> %b) {
|
||||
; GFX9-LABEL: v_add_v2i16_fneg_lhs_fneg_rhs:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX9-NEXT: v_pk_add_u16 v0, v0, v1 neg_lo:[1,1] neg_hi:[1,1]
|
||||
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
||||
;
|
||||
; GFX8-LABEL: v_add_v2i16_fneg_lhs_fneg_rhs:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX8-NEXT: s_mov_b32 s4, 0x80008000
|
||||
; GFX8-NEXT: v_xor_b32_e32 v0, s4, v0
|
||||
; GFX8-NEXT: v_xor_b32_e32 v1, s4, v1
|
||||
; GFX8-NEXT: v_add_u16_e32 v2, v0, v1
|
||||
; GFX8-NEXT: v_add_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
|
||||
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
||||
%neg.a = fneg <2 x half> %a
|
||||
%neg.b = fneg <2 x half> %b
|
||||
%cast.neg.a = bitcast <2 x half> %neg.a to <2 x i16>
|
||||
%cast.neg.b = bitcast <2 x half> %neg.b to <2 x i16>
|
||||
%add = add <2 x i16> %cast.neg.a, %cast.neg.b
|
||||
ret <2 x i16> %add
|
||||
}
|
||||
|
||||
define <2 x i16> @v_add_v2i16_neg_inline_imm_splat(<2 x i16> %a) {
|
||||
; GFX9-LABEL: v_add_v2i16_neg_inline_imm_splat:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX9-NEXT: s_mov_b32 s4, 0xffffffc0
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s4
|
||||
; GFX9-NEXT: v_pk_add_u16 v0, v0, s4
|
||||
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
||||
;
|
||||
; GFX8-LABEL: v_add_v2i16_neg_inline_imm_splat:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX8-NEXT: s_mov_b32 s4, 0xffc0
|
||||
; GFX8-NEXT: v_mov_b32_e32 v2, s4
|
||||
; GFX8-NEXT: v_add_u16_e32 v1, s4, v0
|
||||
; GFX8-NEXT: v_add_u16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
|
||||
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
||||
%add = add <2 x i16> %a, <i16 -64, i16 -64>
|
||||
ret <2 x i16> %add
|
||||
}
|
||||
|
||||
define <2 x i16> @v_add_v2i16_neg_inline_imm_lo(<2 x i16> %a) {
|
||||
; GFX9-LABEL: v_add_v2i16_neg_inline_imm_lo:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s4, 0xffffffc0, 4
|
||||
; GFX9-NEXT: v_pk_add_u16 v0, v0, s4
|
||||
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
||||
;
|
||||
; GFX8-LABEL: v_add_v2i16_neg_inline_imm_lo:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX8-NEXT: v_mov_b32_e32 v2, 4
|
||||
; GFX8-NEXT: v_add_u16_e32 v1, 0xffc0, v0
|
||||
; GFX8-NEXT: v_add_u16_sdwa v0, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
|
||||
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
||||
%add = add <2 x i16> %a, <i16 -64, i16 4>
|
||||
ret <2 x i16> %add
|
||||
}
|
||||
|
||||
define <2 x i16> @v_add_v2i16_neg_inline_imm_hi(<2 x i16> %a) {
|
||||
; GFX9-LABEL: v_add_v2i16_neg_inline_imm_hi:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s4, 4, 0xffffffc0
|
||||
; GFX9-NEXT: v_pk_add_u16 v0, v0, s4
|
||||
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
||||
;
|
||||
; GFX8-LABEL: v_add_v2i16_neg_inline_imm_hi:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX8-NEXT: v_mov_b32_e32 v1, 0xffc0
|
||||
; GFX8-NEXT: v_add_u16_e32 v2, 4, v0
|
||||
; GFX8-NEXT: v_add_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
|
||||
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
||||
%add = add <2 x i16> %a, <i16 4, i16 -64>
|
||||
ret <2 x i16> %add
|
||||
}
|
||||
|
||||
define amdgpu_ps i32 @s_add_v2i16_neg_inline_imm_splat(<2 x i16> inreg %a) {
|
||||
; GFX9-LABEL: s_add_v2i16_neg_inline_imm_splat:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: s_mov_b32 s1, 0xffffffc0
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s1
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s0, 16
|
||||
; GFX9-NEXT: s_lshr_b32 s3, s1, 16
|
||||
; GFX9-NEXT: s_add_i32 s0, s0, s1
|
||||
; GFX9-NEXT: s_add_i32 s2, s2, s3
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s2
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX8-LABEL: s_add_v2i16_neg_inline_imm_splat:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: s_lshr_b32 s2, s0, 16
|
||||
; GFX8-NEXT: s_mov_b32 s3, 0xffff
|
||||
; GFX8-NEXT: s_mov_b32 s1, 0xffc0
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s3
|
||||
; GFX8-NEXT: s_and_b32 s2, s2, s3
|
||||
; GFX8-NEXT: s_add_i32 s0, s0, s1
|
||||
; GFX8-NEXT: s_add_i32 s2, s2, s1
|
||||
; GFX8-NEXT: s_lshl_b32 s1, s2, 16
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s3
|
||||
; GFX8-NEXT: s_or_b32 s0, s1, s0
|
||||
; GFX8-NEXT: ; return to shader part epilog
|
||||
%add = add <2 x i16> %a, <i16 -64, i16 -64>
|
||||
%cast = bitcast <2 x i16> %add to i32
|
||||
ret i32 %cast
|
||||
}
|
||||
|
||||
define amdgpu_ps i32 @s_add_v2i16_neg_inline_imm_lo(<2 x i16> inreg %a) {
|
||||
; GFX9-LABEL: s_add_v2i16_neg_inline_imm_lo:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s1, 0xffffffc0, 4
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s0, 16
|
||||
; GFX9-NEXT: s_lshr_b32 s3, s1, 16
|
||||
; GFX9-NEXT: s_add_i32 s0, s0, s1
|
||||
; GFX9-NEXT: s_add_i32 s2, s2, s3
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s2
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX8-LABEL: s_add_v2i16_neg_inline_imm_lo:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: s_lshr_b32 s1, s0, 16
|
||||
; GFX8-NEXT: s_mov_b32 s2, 0xffff
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s2
|
||||
; GFX8-NEXT: s_and_b32 s1, s1, s2
|
||||
; GFX8-NEXT: s_add_i32 s0, s0, 0xffc0
|
||||
; GFX8-NEXT: s_add_i32 s1, s1, 4
|
||||
; GFX8-NEXT: s_lshl_b32 s1, s1, 16
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s2
|
||||
; GFX8-NEXT: s_or_b32 s0, s1, s0
|
||||
; GFX8-NEXT: ; return to shader part epilog
|
||||
%add = add <2 x i16> %a, <i16 -64, i16 4>
|
||||
%cast = bitcast <2 x i16> %add to i32
|
||||
ret i32 %cast
|
||||
}
|
||||
|
||||
define amdgpu_ps i32 @s_add_v2i16_neg_inline_imm_hi(<2 x i16> inreg %a) {
|
||||
; GFX9-LABEL: s_add_v2i16_neg_inline_imm_hi:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s1, 4, 0xffffffc0
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s0, 16
|
||||
; GFX9-NEXT: s_lshr_b32 s3, s1, 16
|
||||
; GFX9-NEXT: s_add_i32 s0, s0, s1
|
||||
; GFX9-NEXT: s_add_i32 s2, s2, s3
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s2
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX8-LABEL: s_add_v2i16_neg_inline_imm_hi:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: s_lshr_b32 s1, s0, 16
|
||||
; GFX8-NEXT: s_mov_b32 s2, 0xffff
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s2
|
||||
; GFX8-NEXT: s_and_b32 s1, s1, s2
|
||||
; GFX8-NEXT: s_add_i32 s0, s0, 4
|
||||
; GFX8-NEXT: s_add_i32 s1, s1, 0xffc0
|
||||
; GFX8-NEXT: s_lshl_b32 s1, s1, 16
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s2
|
||||
; GFX8-NEXT: s_or_b32 s0, s1, s0
|
||||
; GFX8-NEXT: ; return to shader part epilog
|
||||
%add = add <2 x i16> %a, <i16 4, i16 -64>
|
||||
%cast = bitcast <2 x i16> %add to i32
|
||||
ret i32 %cast
|
||||
}
|
||||
|
||||
define amdgpu_ps i32 @s_add_v2i16(<2 x i16> inreg %a, <2 x i16> inreg %b) {
|
||||
; GFX9-LABEL: s_add_v2i16:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s0, 16
|
||||
; GFX9-NEXT: s_lshr_b32 s3, s1, 16
|
||||
; GFX9-NEXT: s_add_i32 s0, s0, s1
|
||||
; GFX9-NEXT: s_add_i32 s2, s2, s3
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s2
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX8-LABEL: s_add_v2i16:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: s_lshr_b32 s2, s0, 16
|
||||
; GFX8-NEXT: s_mov_b32 s3, 0xffff
|
||||
; GFX8-NEXT: s_lshr_b32 s4, s1, 16
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s3
|
||||
; GFX8-NEXT: s_and_b32 s1, s1, s3
|
||||
; GFX8-NEXT: s_and_b32 s2, s2, s3
|
||||
; GFX8-NEXT: s_and_b32 s4, s4, s3
|
||||
; GFX8-NEXT: s_add_i32 s0, s0, s1
|
||||
; GFX8-NEXT: s_add_i32 s2, s2, s4
|
||||
; GFX8-NEXT: s_lshl_b32 s1, s2, 16
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s3
|
||||
; GFX8-NEXT: s_or_b32 s0, s1, s0
|
||||
; GFX8-NEXT: ; return to shader part epilog
|
||||
%add = add <2 x i16> %a, %b
|
||||
%cast = bitcast <2 x i16> %add to i32
|
||||
ret i32 %cast
|
||||
}
|
||||
|
||||
define amdgpu_ps i32 @s_add_v2i16_fneg_lhs(<2 x half> inreg %a, <2 x i16> inreg %b) {
|
||||
; GFX9-LABEL: s_add_v2i16_fneg_lhs:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: s_xor_b32 s0, s0, 0x80008000
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s0, 16
|
||||
; GFX9-NEXT: s_lshr_b32 s3, s1, 16
|
||||
; GFX9-NEXT: s_add_i32 s0, s0, s1
|
||||
; GFX9-NEXT: s_add_i32 s2, s2, s3
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s2
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX8-LABEL: s_add_v2i16_fneg_lhs:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: s_xor_b32 s0, s0, 0x80008000
|
||||
; GFX8-NEXT: s_lshr_b32 s2, s0, 16
|
||||
; GFX8-NEXT: s_mov_b32 s3, 0xffff
|
||||
; GFX8-NEXT: s_lshr_b32 s4, s1, 16
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s3
|
||||
; GFX8-NEXT: s_and_b32 s1, s1, s3
|
||||
; GFX8-NEXT: s_and_b32 s2, s2, s3
|
||||
; GFX8-NEXT: s_and_b32 s4, s4, s3
|
||||
; GFX8-NEXT: s_add_i32 s0, s0, s1
|
||||
; GFX8-NEXT: s_add_i32 s2, s2, s4
|
||||
; GFX8-NEXT: s_lshl_b32 s1, s2, 16
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s3
|
||||
; GFX8-NEXT: s_or_b32 s0, s1, s0
|
||||
; GFX8-NEXT: ; return to shader part epilog
|
||||
%neg.a = fneg <2 x half> %a
|
||||
%cast.neg.a = bitcast <2 x half> %neg.a to <2 x i16>
|
||||
%add = add <2 x i16> %cast.neg.a, %b
|
||||
%cast = bitcast <2 x i16> %add to i32
|
||||
ret i32 %cast
|
||||
}
|
||||
|
||||
define amdgpu_ps i32 @s_add_v2i16_fneg_rhs(<2 x i16> inreg %a, <2 x half> inreg %b) {
|
||||
; GFX9-LABEL: s_add_v2i16_fneg_rhs:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: s_xor_b32 s1, s1, 0x80008000
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s0, 16
|
||||
; GFX9-NEXT: s_lshr_b32 s3, s1, 16
|
||||
; GFX9-NEXT: s_add_i32 s0, s0, s1
|
||||
; GFX9-NEXT: s_add_i32 s2, s2, s3
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s2
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX8-LABEL: s_add_v2i16_fneg_rhs:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: s_xor_b32 s1, s1, 0x80008000
|
||||
; GFX8-NEXT: s_lshr_b32 s2, s0, 16
|
||||
; GFX8-NEXT: s_mov_b32 s3, 0xffff
|
||||
; GFX8-NEXT: s_lshr_b32 s4, s1, 16
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s3
|
||||
; GFX8-NEXT: s_and_b32 s1, s1, s3
|
||||
; GFX8-NEXT: s_and_b32 s2, s2, s3
|
||||
; GFX8-NEXT: s_and_b32 s4, s4, s3
|
||||
; GFX8-NEXT: s_add_i32 s0, s0, s1
|
||||
; GFX8-NEXT: s_add_i32 s2, s2, s4
|
||||
; GFX8-NEXT: s_lshl_b32 s1, s2, 16
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s3
|
||||
; GFX8-NEXT: s_or_b32 s0, s1, s0
|
||||
; GFX8-NEXT: ; return to shader part epilog
|
||||
%neg.b = fneg <2 x half> %b
|
||||
%cast.neg.b = bitcast <2 x half> %neg.b to <2 x i16>
|
||||
%add = add <2 x i16> %a, %cast.neg.b
|
||||
%cast = bitcast <2 x i16> %add to i32
|
||||
ret i32 %cast
|
||||
}
|
||||
|
||||
define amdgpu_ps i32 @s_add_v2i16_fneg_lhs_fneg_rhs(<2 x half> inreg %a, <2 x half> inreg %b) {
|
||||
; GFX9-LABEL: s_add_v2i16_fneg_lhs_fneg_rhs:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: s_mov_b32 s2, 0x80008000
|
||||
; GFX9-NEXT: s_xor_b32 s1, s1, s2
|
||||
; GFX9-NEXT: s_xor_b32 s0, s0, s2
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s0, 16
|
||||
; GFX9-NEXT: s_lshr_b32 s3, s1, 16
|
||||
; GFX9-NEXT: s_add_i32 s0, s0, s1
|
||||
; GFX9-NEXT: s_add_i32 s2, s2, s3
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s2
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX8-LABEL: s_add_v2i16_fneg_lhs_fneg_rhs:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: s_mov_b32 s2, 0x80008000
|
||||
; GFX8-NEXT: s_xor_b32 s1, s1, s2
|
||||
; GFX8-NEXT: s_xor_b32 s0, s0, s2
|
||||
; GFX8-NEXT: s_lshr_b32 s2, s0, 16
|
||||
; GFX8-NEXT: s_mov_b32 s3, 0xffff
|
||||
; GFX8-NEXT: s_lshr_b32 s4, s1, 16
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s3
|
||||
; GFX8-NEXT: s_and_b32 s1, s1, s3
|
||||
; GFX8-NEXT: s_and_b32 s2, s2, s3
|
||||
; GFX8-NEXT: s_and_b32 s4, s4, s3
|
||||
; GFX8-NEXT: s_add_i32 s0, s0, s1
|
||||
; GFX8-NEXT: s_add_i32 s2, s2, s4
|
||||
; GFX8-NEXT: s_lshl_b32 s1, s2, 16
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s3
|
||||
; GFX8-NEXT: s_or_b32 s0, s1, s0
|
||||
; GFX8-NEXT: ; return to shader part epilog
|
||||
%neg.a = fneg <2 x half> %a
|
||||
%neg.b = fneg <2 x half> %b
|
||||
%cast.neg.a = bitcast <2 x half> %neg.a to <2 x i16>
|
||||
%cast.neg.b = bitcast <2 x half> %neg.b to <2 x i16>
|
||||
%add = add <2 x i16> %cast.neg.a, %cast.neg.b
|
||||
%cast = bitcast <2 x i16> %add to i32
|
||||
ret i32 %cast
|
||||
}
|
|
@ -12,15 +12,16 @@ body: |
|
|||
; CHECK-LABEL: name: add_v2s16_ss
|
||||
; CHECK: [[COPY:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0
|
||||
; CHECK: [[COPY1:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr1
|
||||
; CHECK: [[ANYEXT:%[0-9]+]]:sgpr(<2 x s32>) = G_ANYEXT [[COPY]](<2 x s16>)
|
||||
; CHECK: [[ANYEXT1:%[0-9]+]]:sgpr(<2 x s32>) = G_ANYEXT [[COPY1]](<2 x s16>)
|
||||
; CHECK: [[UV:%[0-9]+]]:sgpr(s32), [[UV1:%[0-9]+]]:sgpr(s32) = G_UNMERGE_VALUES [[ANYEXT]](<2 x s32>)
|
||||
; CHECK: [[UV2:%[0-9]+]]:sgpr(s32), [[UV3:%[0-9]+]]:sgpr(s32) = G_UNMERGE_VALUES [[ANYEXT1]](<2 x s32>)
|
||||
; CHECK: [[ADD:%[0-9]+]]:sgpr(s32) = G_ADD [[UV]], [[UV2]]
|
||||
; CHECK: [[ADD1:%[0-9]+]]:sgpr(s32) = G_ADD [[UV1]], [[UV3]]
|
||||
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:sgpr(<2 x s32>) = G_BUILD_VECTOR [[ADD]](s32), [[ADD1]](s32)
|
||||
; CHECK: [[TRUNC:%[0-9]+]]:sgpr(<2 x s16>) = G_TRUNC [[BUILD_VECTOR]](<2 x s32>)
|
||||
; CHECK: S_ENDPGM 0, implicit [[TRUNC]](<2 x s16>)
|
||||
; CHECK: [[BITCAST:%[0-9]+]]:sgpr(s32) = G_BITCAST [[COPY]](<2 x s16>)
|
||||
; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16
|
||||
; CHECK: [[LSHR:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST]], [[C]](s32)
|
||||
; CHECK: [[BITCAST1:%[0-9]+]]:sgpr(s32) = G_BITCAST [[COPY1]](<2 x s16>)
|
||||
; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16
|
||||
; CHECK: [[LSHR1:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST1]], [[C1]](s32)
|
||||
; CHECK: [[ADD:%[0-9]+]]:sgpr(s32) = G_ADD [[BITCAST]], [[BITCAST1]]
|
||||
; CHECK: [[ADD1:%[0-9]+]]:sgpr(s32) = G_ADD [[LSHR]], [[LSHR1]]
|
||||
; CHECK: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:sgpr(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ADD]](s32), [[ADD1]](s32)
|
||||
; CHECK: S_ENDPGM 0, implicit [[BUILD_VECTOR_TRUNC]](<2 x s16>)
|
||||
%0:_(<2 x s16>) = COPY $sgpr0
|
||||
%1:_(<2 x s16>) = COPY $sgpr1
|
||||
%2:_(<2 x s16>) = G_ADD %0, %1
|
||||
|
|
Loading…
Reference in New Issue