forked from OSchip/llvm-project
[AMDGPU] Check for unneeded shift mask in shift PatFrags.
The existing constrained shift PatFrags only dealt with masked shift from OpenCL front-ends. This change copies the X86DAGToDAGISel::isUnneededShiftMask() function to AMDGPU and uses it in the shift PatFrag predicates. Differential Revision: https://reviews.llvm.org/D113448
This commit is contained in:
parent
8cdf1c1edb
commit
078da26b1c
|
@ -719,6 +719,18 @@ bool AMDGPUDAGToDAGISel::isUniformBr(const SDNode *N) const {
|
|||
Term->getMetadata("structurizecfg.uniform");
|
||||
}
|
||||
|
||||
bool AMDGPUDAGToDAGISel::isUnneededShiftMask(const SDNode *N,
|
||||
unsigned ShAmtBits) const {
|
||||
assert(N->getOpcode() == ISD::AND);
|
||||
|
||||
const APInt &RHS = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue();
|
||||
if (RHS.countTrailingOnes() >= ShAmtBits)
|
||||
return true;
|
||||
|
||||
const APInt &LHSKnownZeros = CurDAG->computeKnownBits(N->getOperand(0)).Zero;
|
||||
return (LHSKnownZeros | RHS).countTrailingOnes() >= ShAmtBits;
|
||||
}
|
||||
|
||||
static bool getBaseWithOffsetUsingSplitOR(SelectionDAG &DAG, SDValue Addr,
|
||||
SDValue &N0, SDValue &N1) {
|
||||
if (Addr.getValueType() == MVT::i64 && Addr.getOpcode() == ISD::BITCAST &&
|
||||
|
|
|
@ -136,6 +136,10 @@ private:
|
|||
bool isUniformLoad(const SDNode *N) const;
|
||||
bool isUniformBr(const SDNode *N) const;
|
||||
|
||||
// Returns true if ISD::AND SDNode `N`'s masking of the shift amount operand's
|
||||
// `ShAmtBits` bits is unneeded.
|
||||
bool isUnneededShiftMask(const SDNode *N, unsigned ShAmtBits) const;
|
||||
|
||||
bool isBaseWithConstantOffset64(SDValue Addr, SDValue &LHS,
|
||||
SDValue &RHS) const;
|
||||
|
||||
|
|
|
@ -3880,6 +3880,22 @@ bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
|
|||
return KnownBits->signBitIsZero(Base);
|
||||
}
|
||||
|
||||
bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI,
|
||||
unsigned ShAmtBits) const {
|
||||
assert(MI.getOpcode() == TargetOpcode::G_AND);
|
||||
|
||||
Optional<APInt> RHS = getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI);
|
||||
if (!RHS)
|
||||
return false;
|
||||
|
||||
if (RHS->countTrailingOnes() >= ShAmtBits)
|
||||
return true;
|
||||
|
||||
const APInt &LHSKnownZeros =
|
||||
KnownBits->getKnownZeroes(MI.getOperand(1).getReg());
|
||||
return (LHSKnownZeros | *RHS).countTrailingOnes() >= ShAmtBits;
|
||||
}
|
||||
|
||||
InstructionSelector::ComplexRendererFns
|
||||
AMDGPUInstructionSelector::selectMUBUFScratchOffset(
|
||||
MachineOperand &Root) const {
|
||||
|
|
|
@ -300,6 +300,10 @@ private:
|
|||
bool isInlineImmediate64(int64_t Imm) const;
|
||||
bool isInlineImmediate(const APFloat &Imm) const;
|
||||
|
||||
// Returns true if TargetOpcode::G_AND MachineInstr `MI`'s masking of the
|
||||
// shift amount operand's `ShAmtBits` bits is unneeded.
|
||||
bool isUnneededShiftMask(const MachineInstr &MI, unsigned ShAmtBits) const;
|
||||
|
||||
const SIInstrInfo &TII;
|
||||
const SIRegisterInfo &TRI;
|
||||
const AMDGPURegisterBankInfo &RBI;
|
||||
|
|
|
@ -242,25 +242,42 @@ def AMDGPUmul_i24_oneuse : HasOneUseBinOp<AMDGPUmul_i24>;
|
|||
//===----------------------------------------------------------------------===//
|
||||
|
||||
// Constrained shift PatFrags.
|
||||
|
||||
def csh_mask_16 : PatFrag<(ops node:$src0), (and node:$src0, imm),
|
||||
[{ return isUnneededShiftMask(N, 4); }]> {
|
||||
let GISelPredicateCode = [{ return isUnneededShiftMask(MI, 4); }];
|
||||
}
|
||||
|
||||
def csh_mask_32 : PatFrag<(ops node:$src0), (and node:$src0, imm),
|
||||
[{ return isUnneededShiftMask(N, 5); }]> {
|
||||
let GISelPredicateCode = [{ return isUnneededShiftMask(MI, 5); }];
|
||||
}
|
||||
|
||||
def csh_mask_64 : PatFrag<(ops node:$src0), (and node:$src0, imm),
|
||||
[{ return isUnneededShiftMask(N, 6); }]> {
|
||||
let GISelPredicateCode = [{ return isUnneededShiftMask(MI, 6); }];
|
||||
}
|
||||
|
||||
foreach width = [16, 32, 64] in {
|
||||
defvar mask = !sub(width, 1);
|
||||
defvar csh_mask = !cast<SDPatternOperator>("csh_mask_"#width);
|
||||
|
||||
def cshl_#width : PatFrags<(ops node:$src0, node:$src1),
|
||||
[(shl node:$src0, node:$src1), (shl node:$src0, (and node:$src1, mask))]>;
|
||||
[(shl node:$src0, node:$src1), (shl node:$src0, (csh_mask node:$src1))]>;
|
||||
defvar cshl = !cast<SDPatternOperator>("cshl_"#width);
|
||||
def cshl_#width#_oneuse : HasOneUseBinOp<cshl>;
|
||||
def clshl_rev_#width : PatFrag <(ops node:$src0, node:$src1),
|
||||
(cshl $src1, $src0)>;
|
||||
|
||||
def csrl_#width : PatFrags<(ops node:$src0, node:$src1),
|
||||
[(srl node:$src0, node:$src1), (srl node:$src0, (and node:$src1, mask))]>;
|
||||
[(srl node:$src0, node:$src1), (srl node:$src0, (csh_mask node:$src1))]>;
|
||||
defvar csrl = !cast<SDPatternOperator>("csrl_"#width);
|
||||
def csrl_#width#_oneuse : HasOneUseBinOp<csrl>;
|
||||
def clshr_rev_#width : PatFrag <(ops node:$src0, node:$src1),
|
||||
(csrl $src1, $src0)>;
|
||||
|
||||
def csra_#width : PatFrags<(ops node:$src0, node:$src1),
|
||||
[(sra node:$src0, node:$src1), (sra node:$src0, (and node:$src1, mask))]>;
|
||||
[(sra node:$src0, node:$src1), (sra node:$src0, (csh_mask node:$src1))]>;
|
||||
defvar csra = !cast<SDPatternOperator>("csra_"#width);
|
||||
def csra_#width#_oneuse : HasOneUseBinOp<csra>;
|
||||
def cashr_rev_#width : PatFrag <(ops node:$src0, node:$src1),
|
||||
|
|
|
@ -74,7 +74,6 @@ define i8 @v_ashr_i8_7(i8 %value) {
|
|||
define amdgpu_ps i8 @s_ashr_i8(i8 inreg %value, i8 inreg %amount) {
|
||||
; GFX6-LABEL: s_ashr_i8:
|
||||
; GFX6: ; %bb.0:
|
||||
; GFX6-NEXT: s_and_b32 s1, s1, 0xff
|
||||
; GFX6-NEXT: s_sext_i32_i8 s0, s0
|
||||
; GFX6-NEXT: s_ashr_i32 s0, s0, s1
|
||||
; GFX6-NEXT: ; return to shader part epilog
|
||||
|
@ -163,14 +162,12 @@ define i24 @v_ashr_i24_7(i24 %value) {
|
|||
define amdgpu_ps i24 @s_ashr_i24(i24 inreg %value, i24 inreg %amount) {
|
||||
; GCN-LABEL: s_ashr_i24:
|
||||
; GCN: ; %bb.0:
|
||||
; GCN-NEXT: s_and_b32 s1, s1, 0xffffff
|
||||
; GCN-NEXT: s_bfe_i32 s0, s0, 0x180000
|
||||
; GCN-NEXT: s_ashr_i32 s0, s0, s1
|
||||
; GCN-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: s_ashr_i24:
|
||||
; GFX10: ; %bb.0:
|
||||
; GFX10-NEXT: s_and_b32 s1, s1, 0xffffff
|
||||
; GFX10-NEXT: s_bfe_i32 s0, s0, 0x180000
|
||||
; GFX10-NEXT: s_ashr_i32 s0, s0, s1
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
|
@ -619,7 +616,6 @@ define i16 @v_ashr_i16_31(i16 %value) {
|
|||
define amdgpu_ps i16 @s_ashr_i16(i16 inreg %value, i16 inreg %amount) {
|
||||
; GFX6-LABEL: s_ashr_i16:
|
||||
; GFX6: ; %bb.0:
|
||||
; GFX6-NEXT: s_and_b32 s1, s1, 0xffff
|
||||
; GFX6-NEXT: s_sext_i32_i16 s0, s0
|
||||
; GFX6-NEXT: s_ashr_i32 s0, s0, s1
|
||||
; GFX6-NEXT: ; return to shader part epilog
|
||||
|
@ -793,13 +789,11 @@ define <2 x i16> @v_ashr_v2i16_15(<2 x i16> %value) {
|
|||
define amdgpu_ps i32 @s_ashr_v2i16(<2 x i16> inreg %value, <2 x i16> inreg %amount) {
|
||||
; GFX6-LABEL: s_ashr_v2i16:
|
||||
; GFX6: ; %bb.0:
|
||||
; GFX6-NEXT: s_mov_b32 s4, 0xffff
|
||||
; GFX6-NEXT: s_and_b32 s2, s2, s4
|
||||
; GFX6-NEXT: s_sext_i32_i16 s0, s0
|
||||
; GFX6-NEXT: s_ashr_i32 s0, s0, s2
|
||||
; GFX6-NEXT: s_and_b32 s2, s3, s4
|
||||
; GFX6-NEXT: s_sext_i32_i16 s1, s1
|
||||
; GFX6-NEXT: s_ashr_i32 s1, s1, s2
|
||||
; GFX6-NEXT: s_mov_b32 s4, 0xffff
|
||||
; GFX6-NEXT: s_sext_i32_i16 s0, s0
|
||||
; GFX6-NEXT: s_ashr_i32 s1, s1, s3
|
||||
; GFX6-NEXT: s_ashr_i32 s0, s0, s2
|
||||
; GFX6-NEXT: s_and_b32 s1, s1, s4
|
||||
; GFX6-NEXT: s_and_b32 s0, s0, s4
|
||||
; GFX6-NEXT: s_lshl_b32 s1, s1, 16
|
||||
|
@ -995,20 +989,16 @@ define <2 x float> @v_ashr_v4i16(<4 x i16> %value, <4 x i16> %amount) {
|
|||
define amdgpu_ps <2 x i32> @s_ashr_v4i16(<4 x i16> inreg %value, <4 x i16> inreg %amount) {
|
||||
; GFX6-LABEL: s_ashr_v4i16:
|
||||
; GFX6: ; %bb.0:
|
||||
; GFX6-NEXT: s_mov_b32 s8, 0xffff
|
||||
; GFX6-NEXT: s_and_b32 s4, s4, s8
|
||||
; GFX6-NEXT: s_sext_i32_i16 s0, s0
|
||||
; GFX6-NEXT: s_ashr_i32 s0, s0, s4
|
||||
; GFX6-NEXT: s_and_b32 s4, s5, s8
|
||||
; GFX6-NEXT: s_sext_i32_i16 s1, s1
|
||||
; GFX6-NEXT: s_ashr_i32 s1, s1, s4
|
||||
; GFX6-NEXT: s_and_b32 s4, s6, s8
|
||||
; GFX6-NEXT: s_mov_b32 s8, 0xffff
|
||||
; GFX6-NEXT: s_sext_i32_i16 s0, s0
|
||||
; GFX6-NEXT: s_ashr_i32 s1, s1, s5
|
||||
; GFX6-NEXT: s_ashr_i32 s0, s0, s4
|
||||
; GFX6-NEXT: s_sext_i32_i16 s2, s2
|
||||
; GFX6-NEXT: s_ashr_i32 s2, s2, s4
|
||||
; GFX6-NEXT: s_and_b32 s4, s7, s8
|
||||
; GFX6-NEXT: s_sext_i32_i16 s3, s3
|
||||
; GFX6-NEXT: s_and_b32 s1, s1, s8
|
||||
; GFX6-NEXT: s_ashr_i32 s3, s3, s4
|
||||
; GFX6-NEXT: s_ashr_i32 s2, s2, s6
|
||||
; GFX6-NEXT: s_ashr_i32 s3, s3, s7
|
||||
; GFX6-NEXT: s_and_b32 s0, s0, s8
|
||||
; GFX6-NEXT: s_lshl_b32 s1, s1, 16
|
||||
; GFX6-NEXT: s_or_b32 s0, s0, s1
|
||||
|
@ -1196,39 +1186,31 @@ define <4 x float> @v_ashr_v8i16(<8 x i16> %value, <8 x i16> %amount) {
|
|||
define amdgpu_ps <4 x i32> @s_ashr_v8i16(<8 x i16> inreg %value, <8 x i16> inreg %amount) {
|
||||
; GFX6-LABEL: s_ashr_v8i16:
|
||||
; GFX6: ; %bb.0:
|
||||
; GFX6-NEXT: s_mov_b32 s16, 0xffff
|
||||
; GFX6-NEXT: s_and_b32 s8, s8, s16
|
||||
; GFX6-NEXT: s_sext_i32_i16 s0, s0
|
||||
; GFX6-NEXT: s_ashr_i32 s0, s0, s8
|
||||
; GFX6-NEXT: s_and_b32 s8, s9, s16
|
||||
; GFX6-NEXT: s_sext_i32_i16 s1, s1
|
||||
; GFX6-NEXT: s_ashr_i32 s1, s1, s8
|
||||
; GFX6-NEXT: s_and_b32 s8, s10, s16
|
||||
; GFX6-NEXT: s_mov_b32 s16, 0xffff
|
||||
; GFX6-NEXT: s_sext_i32_i16 s0, s0
|
||||
; GFX6-NEXT: s_ashr_i32 s1, s1, s9
|
||||
; GFX6-NEXT: s_ashr_i32 s0, s0, s8
|
||||
; GFX6-NEXT: s_sext_i32_i16 s2, s2
|
||||
; GFX6-NEXT: s_ashr_i32 s2, s2, s8
|
||||
; GFX6-NEXT: s_and_b32 s8, s11, s16
|
||||
; GFX6-NEXT: s_sext_i32_i16 s3, s3
|
||||
; GFX6-NEXT: s_ashr_i32 s3, s3, s8
|
||||
; GFX6-NEXT: s_and_b32 s8, s12, s16
|
||||
; GFX6-NEXT: s_sext_i32_i16 s4, s4
|
||||
; GFX6-NEXT: s_ashr_i32 s4, s4, s8
|
||||
; GFX6-NEXT: s_and_b32 s8, s13, s16
|
||||
; GFX6-NEXT: s_sext_i32_i16 s5, s5
|
||||
; GFX6-NEXT: s_and_b32 s1, s1, s16
|
||||
; GFX6-NEXT: s_ashr_i32 s5, s5, s8
|
||||
; GFX6-NEXT: s_and_b32 s8, s14, s16
|
||||
; GFX6-NEXT: s_sext_i32_i16 s6, s6
|
||||
; GFX6-NEXT: s_ashr_i32 s2, s2, s10
|
||||
; GFX6-NEXT: s_ashr_i32 s3, s3, s11
|
||||
; GFX6-NEXT: s_sext_i32_i16 s5, s5
|
||||
; GFX6-NEXT: s_and_b32 s0, s0, s16
|
||||
; GFX6-NEXT: s_lshl_b32 s1, s1, 16
|
||||
; GFX6-NEXT: s_ashr_i32 s6, s6, s8
|
||||
; GFX6-NEXT: s_and_b32 s8, s15, s16
|
||||
; GFX6-NEXT: s_sext_i32_i16 s4, s4
|
||||
; GFX6-NEXT: s_ashr_i32 s5, s5, s13
|
||||
; GFX6-NEXT: s_sext_i32_i16 s7, s7
|
||||
; GFX6-NEXT: s_or_b32 s0, s0, s1
|
||||
; GFX6-NEXT: s_and_b32 s1, s2, s16
|
||||
; GFX6-NEXT: s_and_b32 s2, s3, s16
|
||||
; GFX6-NEXT: s_ashr_i32 s7, s7, s8
|
||||
; GFX6-NEXT: s_ashr_i32 s4, s4, s12
|
||||
; GFX6-NEXT: s_sext_i32_i16 s6, s6
|
||||
; GFX6-NEXT: s_ashr_i32 s7, s7, s15
|
||||
; GFX6-NEXT: s_lshl_b32 s2, s2, 16
|
||||
; GFX6-NEXT: s_and_b32 s3, s5, s16
|
||||
; GFX6-NEXT: s_ashr_i32 s6, s6, s14
|
||||
; GFX6-NEXT: s_or_b32 s1, s1, s2
|
||||
; GFX6-NEXT: s_and_b32 s2, s4, s16
|
||||
; GFX6-NEXT: s_lshl_b32 s3, s3, 16
|
||||
|
|
|
@ -2898,20 +2898,24 @@ define i16 @v_fshl_i16(i16 %lhs, i16 %rhs, i16 %amt) {
|
|||
; GFX8-LABEL: v_fshl_i16:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX8-NEXT: v_xor_b32_e32 v3, -1, v2
|
||||
; GFX8-NEXT: v_and_b32_e32 v3, 15, v2
|
||||
; GFX8-NEXT: v_xor_b32_e32 v2, -1, v2
|
||||
; GFX8-NEXT: v_and_b32_e32 v2, 15, v2
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v1, 1, v1
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v0, v2, v0
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v1, v3, v1
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v0, v3, v0
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v1, v2, v1
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
|
||||
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
||||
;
|
||||
; GFX9-LABEL: v_fshl_i16:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX9-NEXT: v_xor_b32_e32 v3, -1, v2
|
||||
; GFX9-NEXT: v_and_b32_e32 v3, 15, v2
|
||||
; GFX9-NEXT: v_xor_b32_e32 v2, -1, v2
|
||||
; GFX9-NEXT: v_and_b32_e32 v2, 15, v2
|
||||
; GFX9-NEXT: v_lshrrev_b16_e32 v1, 1, v1
|
||||
; GFX9-NEXT: v_lshlrev_b16_e32 v0, v2, v0
|
||||
; GFX9-NEXT: v_lshrrev_b16_e32 v1, v3, v1
|
||||
; GFX9-NEXT: v_lshlrev_b16_e32 v0, v3, v0
|
||||
; GFX9-NEXT: v_lshrrev_b16_e32 v1, v2, v1
|
||||
; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
|
||||
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
||||
;
|
||||
|
@ -2920,7 +2924,9 @@ define i16 @v_fshl_i16(i16 %lhs, i16 %rhs, i16 %amt) {
|
|||
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
|
||||
; GFX10-NEXT: v_xor_b32_e32 v3, -1, v2
|
||||
; GFX10-NEXT: v_and_b32_e32 v2, 15, v2
|
||||
; GFX10-NEXT: v_lshrrev_b16 v1, 1, v1
|
||||
; GFX10-NEXT: v_and_b32_e32 v3, 15, v3
|
||||
; GFX10-NEXT: v_lshlrev_b16 v0, v2, v0
|
||||
; GFX10-NEXT: v_lshrrev_b16 v1, v3, v1
|
||||
; GFX10-NEXT: v_or_b32_e32 v0, v0, v1
|
||||
|
@ -3019,33 +3025,39 @@ define amdgpu_ps half @v_fshl_i16_ssv(i16 inreg %lhs, i16 inreg %rhs, i16 %amt)
|
|||
;
|
||||
; GFX8-LABEL: v_fshl_i16_ssv:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: v_xor_b32_e32 v1, -1, v0
|
||||
; GFX8-NEXT: v_lshlrev_b16_e64 v0, v0, s0
|
||||
; GFX8-NEXT: v_and_b32_e32 v1, 15, v0
|
||||
; GFX8-NEXT: v_xor_b32_e32 v0, -1, v0
|
||||
; GFX8-NEXT: v_lshlrev_b16_e64 v1, v1, s0
|
||||
; GFX8-NEXT: s_bfe_u32 s0, s1, 0x100000
|
||||
; GFX8-NEXT: s_bfe_u32 s1, 1, 0x100000
|
||||
; GFX8-NEXT: v_and_b32_e32 v0, 15, v0
|
||||
; GFX8-NEXT: s_lshr_b32 s0, s0, s1
|
||||
; GFX8-NEXT: v_lshrrev_b16_e64 v1, v1, s0
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
|
||||
; GFX8-NEXT: v_lshrrev_b16_e64 v0, v0, s0
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
|
||||
; GFX8-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX9-LABEL: v_fshl_i16_ssv:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: v_xor_b32_e32 v1, -1, v0
|
||||
; GFX9-NEXT: v_lshlrev_b16_e64 v0, v0, s0
|
||||
; GFX9-NEXT: v_and_b32_e32 v1, 15, v0
|
||||
; GFX9-NEXT: v_xor_b32_e32 v0, -1, v0
|
||||
; GFX9-NEXT: v_lshlrev_b16_e64 v1, v1, s0
|
||||
; GFX9-NEXT: s_bfe_u32 s0, s1, 0x100000
|
||||
; GFX9-NEXT: s_bfe_u32 s1, 1, 0x100000
|
||||
; GFX9-NEXT: v_and_b32_e32 v0, 15, v0
|
||||
; GFX9-NEXT: s_lshr_b32 s0, s0, s1
|
||||
; GFX9-NEXT: v_lshrrev_b16_e64 v1, v1, s0
|
||||
; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
|
||||
; GFX9-NEXT: v_lshrrev_b16_e64 v0, v0, s0
|
||||
; GFX9-NEXT: v_or_b32_e32 v0, v1, v0
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: v_fshl_i16_ssv:
|
||||
; GFX10: ; %bb.0:
|
||||
; GFX10-NEXT: v_xor_b32_e32 v1, -1, v0
|
||||
; GFX10-NEXT: v_and_b32_e32 v0, 15, v0
|
||||
; GFX10-NEXT: s_bfe_u32 s1, s1, 0x100000
|
||||
; GFX10-NEXT: s_bfe_u32 s2, 1, 0x100000
|
||||
; GFX10-NEXT: v_lshlrev_b16 v0, v0, s0
|
||||
; GFX10-NEXT: s_lshr_b32 s1, s1, s2
|
||||
; GFX10-NEXT: v_and_b32_e32 v1, 15, v1
|
||||
; GFX10-NEXT: v_lshlrev_b16 v0, v0, s0
|
||||
; GFX10-NEXT: v_lshrrev_b16 v1, v1, s1
|
||||
; GFX10-NEXT: v_or_b32_e32 v0, v0, v1
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
|
@ -3228,15 +3240,14 @@ define amdgpu_ps i32 @s_fshl_v2i16(<2 x i16> inreg %lhs, <2 x i16> inreg %rhs, <
|
|||
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s3
|
||||
; GFX9-NEXT: s_lshr_b32 s3, s1, 16
|
||||
; GFX9-NEXT: s_and_b32 s1, s1, s4
|
||||
; GFX9-NEXT: s_lshr_b32 s1, s1, 1
|
||||
; GFX9-NEXT: s_lshr_b32 s1, s1, 0x10001
|
||||
; GFX9-NEXT: s_lshr_b32 s3, s3, 1
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s3
|
||||
; GFX9-NEXT: s_lshr_b32 s3, s1, 16
|
||||
; GFX9-NEXT: s_and_b32 s1, s1, s4
|
||||
; GFX9-NEXT: s_lshr_b32 s5, s2, 16
|
||||
; GFX9-NEXT: s_and_b32 s2, s2, s4
|
||||
; GFX9-NEXT: s_lshr_b32 s4, s2, 16
|
||||
; GFX9-NEXT: s_lshr_b32 s1, s1, s2
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s3, s5
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s3, s4
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s2
|
||||
; GFX9-NEXT: s_or_b32 s0, s0, s1
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
|
@ -3247,7 +3258,7 @@ define amdgpu_ps i32 @s_fshl_v2i16(<2 x i16> inreg %lhs, <2 x i16> inreg %rhs, <
|
|||
; GFX10-NEXT: s_mov_b32 s3, 0xf000f
|
||||
; GFX10-NEXT: s_and_b32 s7, s1, s5
|
||||
; GFX10-NEXT: s_lshr_b32 s1, s1, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s7, s7, 1
|
||||
; GFX10-NEXT: s_lshr_b32 s7, s7, 0x10001
|
||||
; GFX10-NEXT: s_lshr_b32 s1, s1, 1
|
||||
; GFX10-NEXT: s_and_b32 s4, s2, s3
|
||||
; GFX10-NEXT: s_andn2_b32 s2, s3, s2
|
||||
|
@ -3257,11 +3268,10 @@ define amdgpu_ps i32 @s_fshl_v2i16(<2 x i16> inreg %lhs, <2 x i16> inreg %rhs, <
|
|||
; GFX10-NEXT: s_lshl_b32 s0, s0, s4
|
||||
; GFX10-NEXT: s_lshr_b32 s4, s1, 16
|
||||
; GFX10-NEXT: s_and_b32 s1, s1, s5
|
||||
; GFX10-NEXT: s_and_b32 s5, s2, s5
|
||||
; GFX10-NEXT: s_lshr_b32 s2, s2, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s5, s2, 16
|
||||
; GFX10-NEXT: s_lshl_b32 s3, s3, s6
|
||||
; GFX10-NEXT: s_lshr_b32 s1, s1, s5
|
||||
; GFX10-NEXT: s_lshr_b32 s2, s4, s2
|
||||
; GFX10-NEXT: s_lshr_b32 s1, s1, s2
|
||||
; GFX10-NEXT: s_lshr_b32 s2, s4, s5
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s3
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s2
|
||||
; GFX10-NEXT: s_or_b32 s0, s0, s1
|
||||
|
@ -3298,17 +3308,21 @@ define <2 x i16> @v_fshl_v2i16(<2 x i16> %lhs, <2 x i16> %rhs, <2 x i16> %amt) {
|
|||
; GFX8-LABEL: v_fshl_v2i16:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX8-NEXT: v_xor_b32_e32 v4, -1, v2
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v5, 1, v1
|
||||
; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v2
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v2, v2, v0
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v4, v4, v5
|
||||
; GFX8-NEXT: v_or_b32_e32 v2, v2, v4
|
||||
; GFX8-NEXT: v_xor_b32_e32 v4, -1, v3
|
||||
; GFX8-NEXT: v_lshlrev_b16_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_mov_b32_e32 v3, 1
|
||||
; GFX8-NEXT: v_lshrrev_b16_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v1, v4, v1
|
||||
; GFX8-NEXT: v_and_b32_e32 v4, 15, v2
|
||||
; GFX8-NEXT: v_xor_b32_e32 v2, -1, v2
|
||||
; GFX8-NEXT: v_and_b32_e32 v2, 15, v2
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v5, 1, v1
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v4, v4, v0
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v2, v2, v5
|
||||
; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
|
||||
; GFX8-NEXT: v_and_b32_e32 v4, 15, v3
|
||||
; GFX8-NEXT: v_xor_b32_e32 v3, -1, v3
|
||||
; GFX8-NEXT: v_lshlrev_b16_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_mov_b32_e32 v4, 1
|
||||
; GFX8-NEXT: v_and_b32_e32 v3, 15, v3
|
||||
; GFX8-NEXT: v_lshrrev_b16_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v1, v3, v1
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
|
||||
; GFX8-NEXT: v_mov_b32_e32 v1, 16
|
||||
; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
|
||||
|
@ -3431,21 +3445,25 @@ define amdgpu_ps float @v_fshl_v2i16_ssv(<2 x i16> inreg %lhs, <2 x i16> inreg %
|
|||
;
|
||||
; GFX8-LABEL: v_fshl_v2i16_ssv:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: v_and_b32_e32 v2, 15, v0
|
||||
; GFX8-NEXT: s_lshr_b32 s2, s0, 16
|
||||
; GFX8-NEXT: s_lshr_b32 s3, s1, 16
|
||||
; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v0
|
||||
; GFX8-NEXT: v_xor_b32_e32 v2, -1, v0
|
||||
; GFX8-NEXT: v_lshlrev_b16_e64 v0, v0, s0
|
||||
; GFX8-NEXT: v_xor_b32_e32 v0, -1, v0
|
||||
; GFX8-NEXT: v_lshlrev_b16_e64 v2, v2, s0
|
||||
; GFX8-NEXT: s_bfe_u32 s0, s1, 0x100000
|
||||
; GFX8-NEXT: s_bfe_u32 s1, 1, 0x100000
|
||||
; GFX8-NEXT: v_and_b32_e32 v0, 15, v0
|
||||
; GFX8-NEXT: s_lshr_b32 s0, s0, s1
|
||||
; GFX8-NEXT: v_lshrrev_b16_e64 v2, v2, s0
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
|
||||
; GFX8-NEXT: v_xor_b32_e32 v2, -1, v1
|
||||
; GFX8-NEXT: v_lshrrev_b16_e64 v0, v0, s0
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
|
||||
; GFX8-NEXT: v_and_b32_e32 v2, 15, v1
|
||||
; GFX8-NEXT: v_xor_b32_e32 v1, -1, v1
|
||||
; GFX8-NEXT: v_and_b32_e32 v1, 15, v1
|
||||
; GFX8-NEXT: s_lshr_b32 s0, s3, s1
|
||||
; GFX8-NEXT: v_lshlrev_b16_e64 v1, v1, s2
|
||||
; GFX8-NEXT: v_lshrrev_b16_e64 v2, v2, s0
|
||||
; GFX8-NEXT: v_or_b32_e32 v1, v1, v2
|
||||
; GFX8-NEXT: v_lshlrev_b16_e64 v2, v2, s2
|
||||
; GFX8-NEXT: v_lshrrev_b16_e64 v1, v1, s0
|
||||
; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
|
||||
; GFX8-NEXT: v_mov_b32_e32 v2, 16
|
||||
; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
|
||||
; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
||||
|
@ -3459,7 +3477,7 @@ define amdgpu_ps float @v_fshl_v2i16_ssv(<2 x i16> inreg %lhs, <2 x i16> inreg %
|
|||
; GFX9-NEXT: s_lshr_b32 s0, s1, 16
|
||||
; GFX9-NEXT: s_and_b32 s1, s1, 0xffff
|
||||
; GFX9-NEXT: v_xor_b32_e32 v0, -1, v0
|
||||
; GFX9-NEXT: s_lshr_b32 s1, s1, 1
|
||||
; GFX9-NEXT: s_lshr_b32 s1, s1, 0x10001
|
||||
; GFX9-NEXT: s_lshr_b32 s0, s0, 1
|
||||
; GFX9-NEXT: v_and_b32_e32 v0, s2, v0
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s1, s0
|
||||
|
@ -3475,7 +3493,7 @@ define amdgpu_ps float @v_fshl_v2i16_ssv(<2 x i16> inreg %lhs, <2 x i16> inreg %
|
|||
; GFX10-NEXT: s_and_b32 s1, s1, 0xffff
|
||||
; GFX10-NEXT: v_and_b32_e32 v0, s2, v0
|
||||
; GFX10-NEXT: v_and_b32_e32 v1, s2, v1
|
||||
; GFX10-NEXT: s_lshr_b32 s1, s1, 1
|
||||
; GFX10-NEXT: s_lshr_b32 s1, s1, 0x10001
|
||||
; GFX10-NEXT: s_lshr_b32 s2, s3, 1
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s2
|
||||
; GFX10-NEXT: v_pk_lshlrev_b16 v0, v0, s0
|
||||
|
@ -3631,15 +3649,14 @@ define amdgpu_ps float @v_fshl_v2i16_vss(<2 x i16> %lhs, <2 x i16> inreg %rhs, <
|
|||
; GFX9-NEXT: s_andn2_b32 s1, s2, s1
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s0, 16
|
||||
; GFX9-NEXT: s_and_b32 s0, s0, s3
|
||||
; GFX9-NEXT: s_lshr_b32 s0, s0, 1
|
||||
; GFX9-NEXT: s_lshr_b32 s0, s0, 0x10001
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s2, 1
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s2
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s0, 16
|
||||
; GFX9-NEXT: s_and_b32 s0, s0, s3
|
||||
; GFX9-NEXT: s_lshr_b32 s4, s1, 16
|
||||
; GFX9-NEXT: s_and_b32 s1, s1, s3
|
||||
; GFX9-NEXT: s_lshr_b32 s3, s1, 16
|
||||
; GFX9-NEXT: s_lshr_b32 s0, s0, s1
|
||||
; GFX9-NEXT: s_lshr_b32 s1, s2, s4
|
||||
; GFX9-NEXT: s_lshr_b32 s1, s2, s3
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s1
|
||||
; GFX9-NEXT: v_or_b32_e32 v0, s0, v0
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
|
@ -3650,7 +3667,7 @@ define amdgpu_ps float @v_fshl_v2i16_vss(<2 x i16> %lhs, <2 x i16> inreg %rhs, <
|
|||
; GFX10-NEXT: s_mov_b32 s2, 0xf000f
|
||||
; GFX10-NEXT: s_and_b32 s5, s0, s3
|
||||
; GFX10-NEXT: s_lshr_b32 s0, s0, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s5, s5, 1
|
||||
; GFX10-NEXT: s_lshr_b32 s5, s5, 0x10001
|
||||
; GFX10-NEXT: s_lshr_b32 s0, s0, 1
|
||||
; GFX10-NEXT: s_and_b32 s4, s1, s2
|
||||
; GFX10-NEXT: s_andn2_b32 s1, s2, s1
|
||||
|
@ -3658,10 +3675,9 @@ define amdgpu_ps float @v_fshl_v2i16_vss(<2 x i16> %lhs, <2 x i16> inreg %rhs, <
|
|||
; GFX10-NEXT: v_pk_lshlrev_b16 v0, s4, v0
|
||||
; GFX10-NEXT: s_lshr_b32 s2, s0, 16
|
||||
; GFX10-NEXT: s_and_b32 s0, s0, s3
|
||||
; GFX10-NEXT: s_and_b32 s3, s1, s3
|
||||
; GFX10-NEXT: s_lshr_b32 s1, s1, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s0, s0, s3
|
||||
; GFX10-NEXT: s_lshr_b32 s1, s2, s1
|
||||
; GFX10-NEXT: s_lshr_b32 s3, s1, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s0, s0, s1
|
||||
; GFX10-NEXT: s_lshr_b32 s1, s2, s3
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s1
|
||||
; GFX10-NEXT: v_or_b32_e32 v0, s0, v0
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
|
@ -3787,24 +3803,24 @@ define amdgpu_ps <2 x i32> @s_fshl_v4i16(<4 x i16> inreg %lhs, <4 x i16> inreg %
|
|||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: s_mov_b32 s6, 0xf000f
|
||||
; GFX9-NEXT: s_and_b32 s7, s4, s6
|
||||
; GFX9-NEXT: s_lshr_b32 s8, s0, 16
|
||||
; GFX9-NEXT: s_lshr_b32 s9, s7, 16
|
||||
; GFX9-NEXT: s_lshr_b32 s9, s0, 16
|
||||
; GFX9-NEXT: s_lshr_b32 s10, s7, 16
|
||||
; GFX9-NEXT: s_lshl_b32 s0, s0, s7
|
||||
; GFX9-NEXT: s_lshl_b32 s7, s8, s9
|
||||
; GFX9-NEXT: s_mov_b32 s8, 0xffff
|
||||
; GFX9-NEXT: s_lshl_b32 s7, s9, s10
|
||||
; GFX9-NEXT: s_mov_b32 s9, 0xffff
|
||||
; GFX9-NEXT: s_mov_b32 s8, 0x10001
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s7
|
||||
; GFX9-NEXT: s_lshr_b32 s7, s2, 16
|
||||
; GFX9-NEXT: s_and_b32 s2, s2, s8
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s2, 1
|
||||
; GFX9-NEXT: s_and_b32 s2, s2, s9
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s2, s8
|
||||
; GFX9-NEXT: s_lshr_b32 s7, s7, 1
|
||||
; GFX9-NEXT: s_andn2_b32 s4, s6, s4
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s7
|
||||
; GFX9-NEXT: s_lshr_b32 s7, s2, 16
|
||||
; GFX9-NEXT: s_and_b32 s2, s2, s8
|
||||
; GFX9-NEXT: s_lshr_b32 s9, s4, 16
|
||||
; GFX9-NEXT: s_and_b32 s4, s4, s8
|
||||
; GFX9-NEXT: s_and_b32 s2, s2, s9
|
||||
; GFX9-NEXT: s_lshr_b32 s10, s4, 16
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s2, s4
|
||||
; GFX9-NEXT: s_lshr_b32 s4, s7, s9
|
||||
; GFX9-NEXT: s_lshr_b32 s4, s7, s10
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s4
|
||||
; GFX9-NEXT: s_or_b32 s0, s0, s2
|
||||
; GFX9-NEXT: s_and_b32 s2, s5, s6
|
||||
|
@ -3815,14 +3831,13 @@ define amdgpu_ps <2 x i32> @s_fshl_v4i16(<4 x i16> inreg %lhs, <4 x i16> inreg %
|
|||
; GFX9-NEXT: s_lshl_b32 s2, s5, s6
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s2
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s3, 16
|
||||
; GFX9-NEXT: s_and_b32 s3, s3, s8
|
||||
; GFX9-NEXT: s_lshr_b32 s3, s3, 1
|
||||
; GFX9-NEXT: s_and_b32 s3, s3, s9
|
||||
; GFX9-NEXT: s_lshr_b32 s3, s3, s8
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s2, 1
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s2, s3, s2
|
||||
; GFX9-NEXT: s_lshr_b32 s3, s2, 16
|
||||
; GFX9-NEXT: s_and_b32 s2, s2, s8
|
||||
; GFX9-NEXT: s_and_b32 s2, s2, s9
|
||||
; GFX9-NEXT: s_lshr_b32 s5, s4, 16
|
||||
; GFX9-NEXT: s_and_b32 s4, s4, s8
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s2, s4
|
||||
; GFX9-NEXT: s_lshr_b32 s3, s3, s5
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s3
|
||||
|
@ -3831,30 +3846,30 @@ define amdgpu_ps <2 x i32> @s_fshl_v4i16(<4 x i16> inreg %lhs, <4 x i16> inreg %
|
|||
;
|
||||
; GFX10-LABEL: s_fshl_v4i16:
|
||||
; GFX10: ; %bb.0:
|
||||
; GFX10-NEXT: s_mov_b32 s9, 0xffff
|
||||
; GFX10-NEXT: s_mov_b32 s10, 0xffff
|
||||
; GFX10-NEXT: s_mov_b32 s6, 0xf000f
|
||||
; GFX10-NEXT: s_and_b32 s11, s2, s9
|
||||
; GFX10-NEXT: s_mov_b32 s8, 0x10001
|
||||
; GFX10-NEXT: s_and_b32 s12, s2, s10
|
||||
; GFX10-NEXT: s_lshr_b32 s2, s2, 16
|
||||
; GFX10-NEXT: s_and_b32 s7, s4, s6
|
||||
; GFX10-NEXT: s_lshr_b32 s11, s11, 1
|
||||
; GFX10-NEXT: s_lshr_b32 s12, s12, s8
|
||||
; GFX10-NEXT: s_lshr_b32 s2, s2, 1
|
||||
; GFX10-NEXT: s_andn2_b32 s4, s6, s4
|
||||
; GFX10-NEXT: s_lshr_b32 s8, s0, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s10, s7, 16
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s2, s11, s2
|
||||
; GFX10-NEXT: s_lshr_b32 s9, s0, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s11, s7, 16
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s2, s12, s2
|
||||
; GFX10-NEXT: s_lshl_b32 s0, s0, s7
|
||||
; GFX10-NEXT: s_lshl_b32 s7, s8, s10
|
||||
; GFX10-NEXT: s_lshr_b32 s8, s2, 16
|
||||
; GFX10-NEXT: s_and_b32 s2, s2, s9
|
||||
; GFX10-NEXT: s_and_b32 s10, s4, s9
|
||||
; GFX10-NEXT: s_lshr_b32 s4, s4, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s2, s2, s10
|
||||
; GFX10-NEXT: s_lshr_b32 s4, s8, s4
|
||||
; GFX10-NEXT: s_and_b32 s8, s3, s9
|
||||
; GFX10-NEXT: s_lshl_b32 s7, s9, s11
|
||||
; GFX10-NEXT: s_lshr_b32 s9, s2, 16
|
||||
; GFX10-NEXT: s_and_b32 s2, s2, s10
|
||||
; GFX10-NEXT: s_lshr_b32 s11, s4, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s2, s2, s4
|
||||
; GFX10-NEXT: s_lshr_b32 s4, s9, s11
|
||||
; GFX10-NEXT: s_and_b32 s9, s3, s10
|
||||
; GFX10-NEXT: s_lshr_b32 s3, s3, 16
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s4
|
||||
; GFX10-NEXT: s_and_b32 s4, s5, s6
|
||||
; GFX10-NEXT: s_lshr_b32 s8, s8, 1
|
||||
; GFX10-NEXT: s_lshr_b32 s8, s9, s8
|
||||
; GFX10-NEXT: s_lshr_b32 s3, s3, 1
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s7
|
||||
; GFX10-NEXT: s_andn2_b32 s5, s6, s5
|
||||
|
@ -3864,11 +3879,10 @@ define amdgpu_ps <2 x i32> @s_fshl_v4i16(<4 x i16> inreg %lhs, <4 x i16> inreg %
|
|||
; GFX10-NEXT: s_lshl_b32 s1, s1, s4
|
||||
; GFX10-NEXT: s_lshl_b32 s4, s6, s7
|
||||
; GFX10-NEXT: s_lshr_b32 s6, s3, 16
|
||||
; GFX10-NEXT: s_and_b32 s3, s3, s9
|
||||
; GFX10-NEXT: s_and_b32 s7, s5, s9
|
||||
; GFX10-NEXT: s_lshr_b32 s5, s5, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s3, s3, s7
|
||||
; GFX10-NEXT: s_lshr_b32 s5, s6, s5
|
||||
; GFX10-NEXT: s_and_b32 s3, s3, s10
|
||||
; GFX10-NEXT: s_lshr_b32 s7, s5, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s3, s3, s5
|
||||
; GFX10-NEXT: s_lshr_b32 s5, s6, s7
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s4
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s3, s3, s5
|
||||
; GFX10-NEXT: s_or_b32 s0, s0, s2
|
||||
|
@ -3924,28 +3938,37 @@ define <4 x half> @v_fshl_v4i16(<4 x i16> %lhs, <4 x i16> %rhs, <4 x i16> %amt)
|
|||
; GFX8-LABEL: v_fshl_v4i16:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX8-NEXT: v_xor_b32_e32 v8, -1, v4
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v9, 1, v2
|
||||
; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v4
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v4, v4, v0
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v8, v8, v9
|
||||
; GFX8-NEXT: v_or_b32_e32 v4, v4, v8
|
||||
; GFX8-NEXT: v_xor_b32_e32 v8, -1, v6
|
||||
; GFX8-NEXT: v_lshlrev_b16_sdwa v0, v6, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_mov_b32_e32 v6, 1
|
||||
; GFX8-NEXT: v_lshrrev_b16_sdwa v2, v6, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v2, v8, v2
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
|
||||
; GFX8-NEXT: v_xor_b32_e32 v2, -1, v5
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v8, 1, v3
|
||||
; GFX8-NEXT: v_and_b32_e32 v8, 15, v4
|
||||
; GFX8-NEXT: v_xor_b32_e32 v4, -1, v4
|
||||
; GFX8-NEXT: v_and_b32_e32 v4, 15, v4
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v9, 1, v2
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v8, v8, v0
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v4, v4, v9
|
||||
; GFX8-NEXT: v_or_b32_e32 v4, v8, v4
|
||||
; GFX8-NEXT: v_and_b32_e32 v8, 15, v6
|
||||
; GFX8-NEXT: v_xor_b32_e32 v6, -1, v6
|
||||
; GFX8-NEXT: v_lshlrev_b16_sdwa v0, v8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_mov_b32_e32 v8, 1
|
||||
; GFX8-NEXT: v_and_b32_e32 v6, 15, v6
|
||||
; GFX8-NEXT: v_lshrrev_b16_sdwa v2, v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v2, v6, v2
|
||||
; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v5
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v5, v5, v1
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v2, v2, v8
|
||||
; GFX8-NEXT: v_or_b32_e32 v2, v5, v2
|
||||
; GFX8-NEXT: v_xor_b32_e32 v5, -1, v7
|
||||
; GFX8-NEXT: v_lshrrev_b16_sdwa v3, v6, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_lshlrev_b16_sdwa v1, v7, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v3, v5, v3
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
|
||||
; GFX8-NEXT: v_and_b32_e32 v2, 15, v5
|
||||
; GFX8-NEXT: v_xor_b32_e32 v5, -1, v5
|
||||
; GFX8-NEXT: v_and_b32_e32 v5, 15, v5
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v6, 1, v3
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v2, v2, v1
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v5, v5, v6
|
||||
; GFX8-NEXT: v_or_b32_e32 v2, v2, v5
|
||||
; GFX8-NEXT: v_and_b32_e32 v5, 15, v7
|
||||
; GFX8-NEXT: v_xor_b32_e32 v6, -1, v7
|
||||
; GFX8-NEXT: v_lshlrev_b16_sdwa v1, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_mov_b32_e32 v5, 1
|
||||
; GFX8-NEXT: v_and_b32_e32 v6, 15, v6
|
||||
; GFX8-NEXT: v_lshrrev_b16_sdwa v3, v5, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v3, v6, v3
|
||||
; GFX8-NEXT: v_or_b32_e32 v1, v1, v3
|
||||
; GFX8-NEXT: v_mov_b32_e32 v3, 16
|
||||
; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
|
||||
|
|
|
@ -2751,20 +2751,24 @@ define i16 @v_fshr_i16(i16 %lhs, i16 %rhs, i16 %amt) {
|
|||
; GFX8-LABEL: v_fshr_i16:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX8-NEXT: v_xor_b32_e32 v3, -1, v2
|
||||
; GFX8-NEXT: v_and_b32_e32 v3, 15, v2
|
||||
; GFX8-NEXT: v_xor_b32_e32 v2, -1, v2
|
||||
; GFX8-NEXT: v_and_b32_e32 v2, 15, v2
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v0, 1, v0
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v0, v3, v0
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v1, v2, v1
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v0, v2, v0
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v1, v3, v1
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
|
||||
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
||||
;
|
||||
; GFX9-LABEL: v_fshr_i16:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX9-NEXT: v_xor_b32_e32 v3, -1, v2
|
||||
; GFX9-NEXT: v_and_b32_e32 v3, 15, v2
|
||||
; GFX9-NEXT: v_xor_b32_e32 v2, -1, v2
|
||||
; GFX9-NEXT: v_and_b32_e32 v2, 15, v2
|
||||
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 1, v0
|
||||
; GFX9-NEXT: v_lshlrev_b16_e32 v0, v3, v0
|
||||
; GFX9-NEXT: v_lshrrev_b16_e32 v1, v2, v1
|
||||
; GFX9-NEXT: v_lshlrev_b16_e32 v0, v2, v0
|
||||
; GFX9-NEXT: v_lshrrev_b16_e32 v1, v3, v1
|
||||
; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
|
||||
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
||||
;
|
||||
|
@ -2774,6 +2778,8 @@ define i16 @v_fshr_i16(i16 %lhs, i16 %rhs, i16 %amt) {
|
|||
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
|
||||
; GFX10-NEXT: v_xor_b32_e32 v3, -1, v2
|
||||
; GFX10-NEXT: v_lshlrev_b16 v0, 1, v0
|
||||
; GFX10-NEXT: v_and_b32_e32 v2, 15, v2
|
||||
; GFX10-NEXT: v_and_b32_e32 v3, 15, v3
|
||||
; GFX10-NEXT: v_lshrrev_b16 v1, v2, v1
|
||||
; GFX10-NEXT: v_lshlrev_b16 v0, v3, v0
|
||||
; GFX10-NEXT: v_or_b32_e32 v0, v0, v1
|
||||
|
@ -2873,30 +2879,36 @@ define amdgpu_ps half @v_fshr_i16_ssv(i16 inreg %lhs, i16 inreg %rhs, i16 %amt)
|
|||
;
|
||||
; GFX8-LABEL: v_fshr_i16_ssv:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: v_and_b32_e32 v1, 15, v0
|
||||
; GFX8-NEXT: v_xor_b32_e32 v0, -1, v0
|
||||
; GFX8-NEXT: s_bfe_u32 s2, 1, 0x100000
|
||||
; GFX8-NEXT: v_xor_b32_e32 v1, -1, v0
|
||||
; GFX8-NEXT: v_and_b32_e32 v0, 15, v0
|
||||
; GFX8-NEXT: s_lshl_b32 s0, s0, s2
|
||||
; GFX8-NEXT: v_lshlrev_b16_e64 v1, v1, s0
|
||||
; GFX8-NEXT: v_lshrrev_b16_e64 v0, v0, s1
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
|
||||
; GFX8-NEXT: v_lshlrev_b16_e64 v0, v0, s0
|
||||
; GFX8-NEXT: v_lshrrev_b16_e64 v1, v1, s1
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
|
||||
; GFX8-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX9-LABEL: v_fshr_i16_ssv:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: v_and_b32_e32 v1, 15, v0
|
||||
; GFX9-NEXT: v_xor_b32_e32 v0, -1, v0
|
||||
; GFX9-NEXT: s_bfe_u32 s2, 1, 0x100000
|
||||
; GFX9-NEXT: v_xor_b32_e32 v1, -1, v0
|
||||
; GFX9-NEXT: v_and_b32_e32 v0, 15, v0
|
||||
; GFX9-NEXT: s_lshl_b32 s0, s0, s2
|
||||
; GFX9-NEXT: v_lshlrev_b16_e64 v1, v1, s0
|
||||
; GFX9-NEXT: v_lshrrev_b16_e64 v0, v0, s1
|
||||
; GFX9-NEXT: v_or_b32_e32 v0, v1, v0
|
||||
; GFX9-NEXT: v_lshlrev_b16_e64 v0, v0, s0
|
||||
; GFX9-NEXT: v_lshrrev_b16_e64 v1, v1, s1
|
||||
; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: v_fshr_i16_ssv:
|
||||
; GFX10: ; %bb.0:
|
||||
; GFX10-NEXT: v_xor_b32_e32 v1, -1, v0
|
||||
; GFX10-NEXT: v_and_b32_e32 v0, 15, v0
|
||||
; GFX10-NEXT: s_bfe_u32 s2, 1, 0x100000
|
||||
; GFX10-NEXT: v_lshrrev_b16 v0, v0, s1
|
||||
; GFX10-NEXT: s_lshl_b32 s0, s0, s2
|
||||
; GFX10-NEXT: v_and_b32_e32 v1, 15, v1
|
||||
; GFX10-NEXT: v_lshrrev_b16 v0, v0, s1
|
||||
; GFX10-NEXT: v_lshlrev_b16 v1, v1, s0
|
||||
; GFX10-NEXT: v_or_b32_e32 v0, v1, v0
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
|
@ -3110,14 +3122,12 @@ define amdgpu_ps i32 @s_fshr_v2i16(<2 x i16> inreg %lhs, <2 x i16> inreg %rhs, <
|
|||
; GFX9-NEXT: s_lshr_b32 s5, s2, 16
|
||||
; GFX9-NEXT: s_lshl_b32 s0, s0, s2
|
||||
; GFX9-NEXT: s_lshl_b32 s2, s3, s5
|
||||
; GFX9-NEXT: s_mov_b32 s3, 0xffff
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s2
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s1, 16
|
||||
; GFX9-NEXT: s_and_b32 s1, s1, s3
|
||||
; GFX9-NEXT: s_lshr_b32 s5, s4, 16
|
||||
; GFX9-NEXT: s_and_b32 s3, s4, s3
|
||||
; GFX9-NEXT: s_lshr_b32 s1, s1, s3
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s2, s5
|
||||
; GFX9-NEXT: s_and_b32 s1, s1, 0xffff
|
||||
; GFX9-NEXT: s_lshr_b32 s3, s4, 16
|
||||
; GFX9-NEXT: s_lshr_b32 s1, s1, s4
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s2, s3
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s2
|
||||
; GFX9-NEXT: s_or_b32 s0, s0, s1
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
|
@ -3135,13 +3145,11 @@ define amdgpu_ps i32 @s_fshr_v2i16(<2 x i16> inreg %lhs, <2 x i16> inreg %rhs, <
|
|||
; GFX10-NEXT: s_lshr_b32 s4, s2, 16
|
||||
; GFX10-NEXT: s_lshl_b32 s0, s0, s2
|
||||
; GFX10-NEXT: s_lshl_b32 s2, s3, s4
|
||||
; GFX10-NEXT: s_mov_b32 s3, 0xffff
|
||||
; GFX10-NEXT: s_lshr_b32 s4, s1, 16
|
||||
; GFX10-NEXT: s_and_b32 s1, s1, s3
|
||||
; GFX10-NEXT: s_and_b32 s3, s5, s3
|
||||
; GFX10-NEXT: s_lshr_b32 s5, s5, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s1, s1, s3
|
||||
; GFX10-NEXT: s_lshr_b32 s3, s4, s5
|
||||
; GFX10-NEXT: s_lshr_b32 s3, s1, 16
|
||||
; GFX10-NEXT: s_and_b32 s1, s1, 0xffff
|
||||
; GFX10-NEXT: s_lshr_b32 s4, s5, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s1, s1, s5
|
||||
; GFX10-NEXT: s_lshr_b32 s3, s3, s4
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s2
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s3
|
||||
; GFX10-NEXT: s_or_b32 s0, s0, s1
|
||||
|
@ -3202,20 +3210,24 @@ define <2 x i16> @v_fshr_v2i16(<2 x i16> %lhs, <2 x i16> %rhs, <2 x i16> %amt) {
|
|||
; GFX8-NEXT: v_mov_b32_e32 v5, 15
|
||||
; GFX8-NEXT: v_lshlrev_b16_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_lshrrev_b16_sdwa v5, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_xor_b32_e32 v2, -1, v2
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v0, v5
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v5, 1, v1
|
||||
; GFX8-NEXT: v_xor_b32_e32 v2, -1, v2
|
||||
; GFX8-NEXT: v_lshlrev_b16_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v2
|
||||
; GFX8-NEXT: v_xor_b32_e32 v6, -1, v2
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v2, v2, v3
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v3, 1, v5
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v3, v6, v3
|
||||
; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
|
||||
; GFX8-NEXT: v_xor_b32_e32 v3, -1, v4
|
||||
; GFX8-NEXT: v_and_b32_e32 v6, 15, v2
|
||||
; GFX8-NEXT: v_xor_b32_e32 v2, -1, v2
|
||||
; GFX8-NEXT: v_and_b32_e32 v2, 15, v2
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v5, 1, v5
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v3, v6, v3
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v2, v2, v5
|
||||
; GFX8-NEXT: v_or_b32_e32 v2, v3, v2
|
||||
; GFX8-NEXT: v_and_b32_e32 v3, 15, v4
|
||||
; GFX8-NEXT: v_xor_b32_e32 v4, -1, v4
|
||||
; GFX8-NEXT: v_and_b32_e32 v4, 15, v4
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v1, 1, v1
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v0, v4, v0
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v1, v3, v1
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v0, v3, v0
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v1, v4, v1
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
|
||||
; GFX8-NEXT: v_mov_b32_e32 v1, 16
|
||||
; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
|
||||
|
@ -3361,27 +3373,31 @@ define amdgpu_ps float @v_fshr_v2i16_ssv(<2 x i16> inreg %lhs, <2 x i16> inreg %
|
|||
; GFX8-NEXT: s_lshr_b32 s2, s0, 16
|
||||
; GFX8-NEXT: s_lshl_b32 s0, s0, s4
|
||||
; GFX8-NEXT: s_lshr_b32 s5, s5, s6
|
||||
; GFX8-NEXT: v_xor_b32_e32 v0, -1, v0
|
||||
; GFX8-NEXT: s_lshr_b32 s3, s1, 16
|
||||
; GFX8-NEXT: s_or_b32 s0, s0, s5
|
||||
; GFX8-NEXT: s_lshl_b32 s1, s1, s4
|
||||
; GFX8-NEXT: v_xor_b32_e32 v0, -1, v0
|
||||
; GFX8-NEXT: v_and_b32_e32 v2, 15, v0
|
||||
; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v0
|
||||
; GFX8-NEXT: v_xor_b32_e32 v2, -1, v0
|
||||
; GFX8-NEXT: v_lshlrev_b16_e64 v0, v0, s0
|
||||
; GFX8-NEXT: v_xor_b32_e32 v0, -1, v0
|
||||
; GFX8-NEXT: v_lshlrev_b16_e64 v2, v2, s0
|
||||
; GFX8-NEXT: s_bfe_u32 s0, s1, 0x100000
|
||||
; GFX8-NEXT: v_and_b32_e32 v0, 15, v0
|
||||
; GFX8-NEXT: s_lshr_b32 s0, s0, s4
|
||||
; GFX8-NEXT: s_lshr_b32 s5, s3, s6
|
||||
; GFX8-NEXT: s_lshl_b32 s3, s3, s4
|
||||
; GFX8-NEXT: s_lshr_b32 s0, s0, s4
|
||||
; GFX8-NEXT: v_lshrrev_b16_e64 v0, v0, s0
|
||||
; GFX8-NEXT: s_lshl_b32 s2, s2, s4
|
||||
; GFX8-NEXT: v_lshrrev_b16_e64 v2, v2, s0
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
|
||||
; GFX8-NEXT: v_and_b32_e32 v2, 15, v1
|
||||
; GFX8-NEXT: v_xor_b32_e32 v1, -1, v1
|
||||
; GFX8-NEXT: s_bfe_u32 s0, s3, 0x100000
|
||||
; GFX8-NEXT: s_or_b32 s2, s2, s5
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
|
||||
; GFX8-NEXT: v_xor_b32_e32 v2, -1, v1
|
||||
; GFX8-NEXT: v_and_b32_e32 v1, 15, v1
|
||||
; GFX8-NEXT: s_lshr_b32 s0, s0, s4
|
||||
; GFX8-NEXT: v_lshlrev_b16_e64 v1, v1, s2
|
||||
; GFX8-NEXT: v_lshrrev_b16_e64 v2, v2, s0
|
||||
; GFX8-NEXT: v_or_b32_e32 v1, v1, v2
|
||||
; GFX8-NEXT: v_lshlrev_b16_e64 v2, v2, s2
|
||||
; GFX8-NEXT: v_lshrrev_b16_e64 v1, v1, s0
|
||||
; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
|
||||
; GFX8-NEXT: v_mov_b32_e32 v2, 16
|
||||
; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
|
||||
; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
||||
|
@ -3623,32 +3639,28 @@ define amdgpu_ps float @v_fshr_v2i16_vss(<2 x i16> %lhs, <2 x i16> inreg %rhs, <
|
|||
; GFX9-NEXT: s_and_b32 s3, s1, s2
|
||||
; GFX9-NEXT: s_andn2_b32 s1, s2, s1
|
||||
; GFX9-NEXT: v_pk_lshlrev_b16 v0, 1, v0 op_sel_hi:[0,1]
|
||||
; GFX9-NEXT: s_mov_b32 s2, 0xffff
|
||||
; GFX9-NEXT: v_pk_lshlrev_b16 v0, s1, v0
|
||||
; GFX9-NEXT: s_lshr_b32 s1, s0, 16
|
||||
; GFX9-NEXT: s_and_b32 s0, s0, s2
|
||||
; GFX9-NEXT: s_lshr_b32 s4, s3, 16
|
||||
; GFX9-NEXT: s_and_b32 s2, s3, s2
|
||||
; GFX9-NEXT: s_lshr_b32 s0, s0, s2
|
||||
; GFX9-NEXT: s_lshr_b32 s1, s1, s4
|
||||
; GFX9-NEXT: s_and_b32 s0, s0, 0xffff
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s3, 16
|
||||
; GFX9-NEXT: s_lshr_b32 s0, s0, s3
|
||||
; GFX9-NEXT: s_lshr_b32 s1, s1, s2
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s1
|
||||
; GFX9-NEXT: v_or_b32_e32 v0, s0, v0
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: v_fshr_v2i16_vss:
|
||||
; GFX10: ; %bb.0:
|
||||
; GFX10-NEXT: v_pk_lshlrev_b16 v0, 1, v0 op_sel_hi:[0,1]
|
||||
; GFX10-NEXT: s_mov_b32 s2, 0xf000f
|
||||
; GFX10-NEXT: s_mov_b32 s3, 0xffff
|
||||
; GFX10-NEXT: s_and_b32 s4, s1, s2
|
||||
; GFX10-NEXT: v_pk_lshlrev_b16 v0, 1, v0 op_sel_hi:[0,1]
|
||||
; GFX10-NEXT: s_and_b32 s3, s1, s2
|
||||
; GFX10-NEXT: s_andn2_b32 s1, s2, s1
|
||||
; GFX10-NEXT: s_lshr_b32 s2, s0, 16
|
||||
; GFX10-NEXT: s_and_b32 s0, s0, s3
|
||||
; GFX10-NEXT: s_and_b32 s0, s0, 0xffff
|
||||
; GFX10-NEXT: s_lshr_b32 s4, s3, 16
|
||||
; GFX10-NEXT: v_pk_lshlrev_b16 v0, s1, v0
|
||||
; GFX10-NEXT: s_and_b32 s1, s4, s3
|
||||
; GFX10-NEXT: s_lshr_b32 s3, s4, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s0, s0, s1
|
||||
; GFX10-NEXT: s_lshr_b32 s1, s2, s3
|
||||
; GFX10-NEXT: s_lshr_b32 s0, s0, s3
|
||||
; GFX10-NEXT: s_lshr_b32 s1, s2, s4
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s1
|
||||
; GFX10-NEXT: v_or_b32_e32 v0, s0, v0
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
|
@ -3845,7 +3857,6 @@ define amdgpu_ps <2 x i32> @s_fshr_v4i16(<4 x i16> inreg %lhs, <4 x i16> inreg %
|
|||
; GFX9-NEXT: s_lshr_b32 s4, s2, 16
|
||||
; GFX9-NEXT: s_and_b32 s2, s2, s9
|
||||
; GFX9-NEXT: s_lshr_b32 s10, s7, 16
|
||||
; GFX9-NEXT: s_and_b32 s7, s7, s9
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s2, s7
|
||||
; GFX9-NEXT: s_lshr_b32 s4, s4, s10
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s4
|
||||
|
@ -3864,7 +3875,6 @@ define amdgpu_ps <2 x i32> @s_fshr_v4i16(<4 x i16> inreg %lhs, <4 x i16> inreg %
|
|||
; GFX9-NEXT: s_lshr_b32 s4, s3, 16
|
||||
; GFX9-NEXT: s_and_b32 s3, s3, s9
|
||||
; GFX9-NEXT: s_lshr_b32 s5, s2, 16
|
||||
; GFX9-NEXT: s_and_b32 s2, s2, s9
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s3, s2
|
||||
; GFX9-NEXT: s_lshr_b32 s3, s4, s5
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s3
|
||||
|
@ -3897,18 +3907,16 @@ define amdgpu_ps <2 x i32> @s_fshr_v4i16(<4 x i16> inreg %lhs, <4 x i16> inreg %
|
|||
; GFX10-NEXT: s_lshr_b32 s6, s4, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s10, s2, 16
|
||||
; GFX10-NEXT: s_and_b32 s2, s2, s8
|
||||
; GFX10-NEXT: s_and_b32 s11, s9, s8
|
||||
; GFX10-NEXT: s_lshr_b32 s9, s9, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s11, s9, 16
|
||||
; GFX10-NEXT: s_lshl_b32 s1, s1, s4
|
||||
; GFX10-NEXT: s_lshl_b32 s4, s5, s6
|
||||
; GFX10-NEXT: s_lshr_b32 s5, s3, 16
|
||||
; GFX10-NEXT: s_and_b32 s3, s3, s8
|
||||
; GFX10-NEXT: s_and_b32 s6, s7, s8
|
||||
; GFX10-NEXT: s_lshr_b32 s7, s7, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s2, s2, s11
|
||||
; GFX10-NEXT: s_lshr_b32 s9, s10, s9
|
||||
; GFX10-NEXT: s_lshr_b32 s3, s3, s6
|
||||
; GFX10-NEXT: s_lshr_b32 s5, s5, s7
|
||||
; GFX10-NEXT: s_lshr_b32 s6, s7, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s2, s2, s9
|
||||
; GFX10-NEXT: s_lshr_b32 s9, s10, s11
|
||||
; GFX10-NEXT: s_lshr_b32 s3, s3, s7
|
||||
; GFX10-NEXT: s_lshr_b32 s5, s5, s6
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s9
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s4
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s3, s3, s5
|
||||
|
@ -4005,20 +4013,24 @@ define <4 x half> @v_fshr_v4i16(<4 x i16> %lhs, <4 x i16> %rhs, <4 x i16> %amt)
|
|||
; GFX8-NEXT: v_mov_b32_e32 v8, 15
|
||||
; GFX8-NEXT: v_lshlrev_b16_sdwa v0, v7, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_lshrrev_b16_sdwa v9, v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_xor_b32_e32 v4, -1, v4
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v0, v9
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v9, 1, v2
|
||||
; GFX8-NEXT: v_xor_b32_e32 v4, -1, v4
|
||||
; GFX8-NEXT: v_lshlrev_b16_sdwa v2, v7, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v4
|
||||
; GFX8-NEXT: v_xor_b32_e32 v10, -1, v4
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v4, v4, v6
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v6, 1, v9
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v6, v10, v6
|
||||
; GFX8-NEXT: v_or_b32_e32 v4, v4, v6
|
||||
; GFX8-NEXT: v_xor_b32_e32 v6, -1, v7
|
||||
; GFX8-NEXT: v_and_b32_e32 v10, 15, v4
|
||||
; GFX8-NEXT: v_xor_b32_e32 v4, -1, v4
|
||||
; GFX8-NEXT: v_and_b32_e32 v4, 15, v4
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v9, 1, v9
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v6, v10, v6
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v4, v4, v9
|
||||
; GFX8-NEXT: v_or_b32_e32 v4, v6, v4
|
||||
; GFX8-NEXT: v_and_b32_e32 v6, 15, v7
|
||||
; GFX8-NEXT: v_xor_b32_e32 v7, -1, v7
|
||||
; GFX8-NEXT: v_and_b32_e32 v7, 15, v7
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v2, 1, v2
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v0, v7, v0
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v2, v6, v2
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v0, v6, v0
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v2, v7, v2
|
||||
; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
|
||||
; GFX8-NEXT: v_mov_b32_e32 v2, 16
|
||||
; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
|
||||
|
@ -4029,20 +4041,24 @@ define <4 x half> @v_fshr_v4i16(<4 x i16> %lhs, <4 x i16> %rhs, <4 x i16> %amt)
|
|||
; GFX8-NEXT: v_mov_b32_e32 v6, 1
|
||||
; GFX8-NEXT: v_lshlrev_b16_sdwa v1, v6, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_lshrrev_b16_sdwa v7, v8, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_xor_b32_e32 v5, -1, v5
|
||||
; GFX8-NEXT: v_or_b32_e32 v1, v1, v7
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v7, 1, v3
|
||||
; GFX8-NEXT: v_xor_b32_e32 v5, -1, v5
|
||||
; GFX8-NEXT: v_lshlrev_b16_sdwa v3, v6, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
||||
; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v5
|
||||
; GFX8-NEXT: v_xor_b32_e32 v8, -1, v5
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v4, v5, v4
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v5, 1, v7
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v5, v8, v5
|
||||
; GFX8-NEXT: v_and_b32_e32 v8, 15, v5
|
||||
; GFX8-NEXT: v_xor_b32_e32 v5, -1, v5
|
||||
; GFX8-NEXT: v_and_b32_e32 v5, 15, v5
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v7, 1, v7
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v4, v8, v4
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v5, v5, v7
|
||||
; GFX8-NEXT: v_or_b32_e32 v4, v4, v5
|
||||
; GFX8-NEXT: v_xor_b32_e32 v5, -1, v6
|
||||
; GFX8-NEXT: v_and_b32_e32 v5, 15, v6
|
||||
; GFX8-NEXT: v_xor_b32_e32 v6, -1, v6
|
||||
; GFX8-NEXT: v_and_b32_e32 v6, 15, v6
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v3, 1, v3
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v1, v6, v1
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v3, v5, v3
|
||||
; GFX8-NEXT: v_lshlrev_b16_e32 v1, v5, v1
|
||||
; GFX8-NEXT: v_lshrrev_b16_e32 v3, v6, v3
|
||||
; GFX8-NEXT: v_or_b32_e32 v1, v1, v3
|
||||
; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
|
||||
; GFX8-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
||||
|
|
|
@ -71,35 +71,15 @@ define i8 @v_lshr_i8_7(i8 %value) {
|
|||
}
|
||||
|
||||
define amdgpu_ps i8 @s_lshr_i8(i8 inreg %value, i8 inreg %amount) {
|
||||
; GFX6-LABEL: s_lshr_i8:
|
||||
; GFX6: ; %bb.0:
|
||||
; GFX6-NEXT: s_movk_i32 s2, 0xff
|
||||
; GFX6-NEXT: s_and_b32 s1, s1, s2
|
||||
; GFX6-NEXT: s_and_b32 s0, s0, s2
|
||||
; GFX6-NEXT: s_lshr_b32 s0, s0, s1
|
||||
; GFX6-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX8-LABEL: s_lshr_i8:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: s_movk_i32 s2, 0xff
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s2
|
||||
; GFX8-NEXT: s_and_b32 s1, s1, s2
|
||||
; GFX8-NEXT: s_lshr_b32 s0, s0, s1
|
||||
; GFX8-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX9-LABEL: s_lshr_i8:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: s_movk_i32 s2, 0xff
|
||||
; GFX9-NEXT: s_and_b32 s0, s0, s2
|
||||
; GFX9-NEXT: s_and_b32 s1, s1, s2
|
||||
; GFX9-NEXT: s_lshr_b32 s0, s0, s1
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
; GCN-LABEL: s_lshr_i8:
|
||||
; GCN: ; %bb.0:
|
||||
; GCN-NEXT: s_and_b32 s0, s0, 0xff
|
||||
; GCN-NEXT: s_lshr_b32 s0, s0, s1
|
||||
; GCN-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: s_lshr_i8:
|
||||
; GFX10: ; %bb.0:
|
||||
; GFX10-NEXT: s_movk_i32 s2, 0xff
|
||||
; GFX10-NEXT: s_and_b32 s0, s0, s2
|
||||
; GFX10-NEXT: s_and_b32 s1, s1, s2
|
||||
; GFX10-NEXT: s_and_b32 s0, s0, 0xff
|
||||
; GFX10-NEXT: s_lshr_b32 s0, s0, s1
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
%result = lshr i8 %value, %amount
|
||||
|
@ -164,17 +144,13 @@ define i24 @v_lshr_i24_7(i24 %value) {
|
|||
define amdgpu_ps i24 @s_lshr_i24(i24 inreg %value, i24 inreg %amount) {
|
||||
; GCN-LABEL: s_lshr_i24:
|
||||
; GCN: ; %bb.0:
|
||||
; GCN-NEXT: s_mov_b32 s2, 0xffffff
|
||||
; GCN-NEXT: s_and_b32 s1, s1, s2
|
||||
; GCN-NEXT: s_and_b32 s0, s0, s2
|
||||
; GCN-NEXT: s_and_b32 s0, s0, 0xffffff
|
||||
; GCN-NEXT: s_lshr_b32 s0, s0, s1
|
||||
; GCN-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: s_lshr_i24:
|
||||
; GFX10: ; %bb.0:
|
||||
; GFX10-NEXT: s_mov_b32 s2, 0xffffff
|
||||
; GFX10-NEXT: s_and_b32 s1, s1, s2
|
||||
; GFX10-NEXT: s_and_b32 s0, s0, s2
|
||||
; GFX10-NEXT: s_and_b32 s0, s0, 0xffffff
|
||||
; GFX10-NEXT: s_lshr_b32 s0, s0, s1
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
%result = lshr i24 %value, %amount
|
||||
|
@ -619,35 +595,15 @@ define i16 @v_lshr_i16_31(i16 %value) {
|
|||
}
|
||||
|
||||
define amdgpu_ps i16 @s_lshr_i16(i16 inreg %value, i16 inreg %amount) {
|
||||
; GFX6-LABEL: s_lshr_i16:
|
||||
; GFX6: ; %bb.0:
|
||||
; GFX6-NEXT: s_mov_b32 s2, 0xffff
|
||||
; GFX6-NEXT: s_and_b32 s1, s1, s2
|
||||
; GFX6-NEXT: s_and_b32 s0, s0, s2
|
||||
; GFX6-NEXT: s_lshr_b32 s0, s0, s1
|
||||
; GFX6-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX8-LABEL: s_lshr_i16:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: s_mov_b32 s2, 0xffff
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s2
|
||||
; GFX8-NEXT: s_and_b32 s1, s1, s2
|
||||
; GFX8-NEXT: s_lshr_b32 s0, s0, s1
|
||||
; GFX8-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX9-LABEL: s_lshr_i16:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: s_mov_b32 s2, 0xffff
|
||||
; GFX9-NEXT: s_and_b32 s0, s0, s2
|
||||
; GFX9-NEXT: s_and_b32 s1, s1, s2
|
||||
; GFX9-NEXT: s_lshr_b32 s0, s0, s1
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
; GCN-LABEL: s_lshr_i16:
|
||||
; GCN: ; %bb.0:
|
||||
; GCN-NEXT: s_and_b32 s0, s0, 0xffff
|
||||
; GCN-NEXT: s_lshr_b32 s0, s0, s1
|
||||
; GCN-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: s_lshr_i16:
|
||||
; GFX10: ; %bb.0:
|
||||
; GFX10-NEXT: s_mov_b32 s2, 0xffff
|
||||
; GFX10-NEXT: s_and_b32 s0, s0, s2
|
||||
; GFX10-NEXT: s_and_b32 s1, s1, s2
|
||||
; GFX10-NEXT: s_and_b32 s0, s0, 0xffff
|
||||
; GFX10-NEXT: s_lshr_b32 s0, s0, s1
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
%result = lshr i16 %value, %amount
|
||||
|
@ -798,12 +754,10 @@ define amdgpu_ps i32 @s_lshr_v2i16(<2 x i16> inreg %value, <2 x i16> inreg %amou
|
|||
; GFX6-LABEL: s_lshr_v2i16:
|
||||
; GFX6: ; %bb.0:
|
||||
; GFX6-NEXT: s_mov_b32 s4, 0xffff
|
||||
; GFX6-NEXT: s_and_b32 s2, s2, s4
|
||||
; GFX6-NEXT: s_and_b32 s0, s0, s4
|
||||
; GFX6-NEXT: s_lshr_b32 s0, s0, s2
|
||||
; GFX6-NEXT: s_and_b32 s2, s3, s4
|
||||
; GFX6-NEXT: s_and_b32 s1, s1, s4
|
||||
; GFX6-NEXT: s_lshr_b32 s1, s1, s2
|
||||
; GFX6-NEXT: s_and_b32 s0, s0, s4
|
||||
; GFX6-NEXT: s_lshr_b32 s1, s1, s3
|
||||
; GFX6-NEXT: s_lshr_b32 s0, s0, s2
|
||||
; GFX6-NEXT: s_lshl_b32 s1, s1, 16
|
||||
; GFX6-NEXT: s_or_b32 s0, s0, s1
|
||||
; GFX6-NEXT: ; return to shader part epilog
|
||||
|
@ -814,7 +768,6 @@ define amdgpu_ps i32 @s_lshr_v2i16(<2 x i16> inreg %value, <2 x i16> inreg %amou
|
|||
; GFX8-NEXT: s_lshr_b32 s2, s0, 16
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s3
|
||||
; GFX8-NEXT: s_lshr_b32 s4, s1, 16
|
||||
; GFX8-NEXT: s_and_b32 s1, s1, s3
|
||||
; GFX8-NEXT: s_lshr_b32 s0, s0, s1
|
||||
; GFX8-NEXT: s_lshr_b32 s1, s2, s4
|
||||
; GFX8-NEXT: s_lshl_b32 s1, s1, 16
|
||||
|
@ -824,25 +777,21 @@ define amdgpu_ps i32 @s_lshr_v2i16(<2 x i16> inreg %value, <2 x i16> inreg %amou
|
|||
;
|
||||
; GFX9-LABEL: s_lshr_v2i16:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: s_mov_b32 s3, 0xffff
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s0, 16
|
||||
; GFX9-NEXT: s_and_b32 s0, s0, s3
|
||||
; GFX9-NEXT: s_lshr_b32 s4, s1, 16
|
||||
; GFX9-NEXT: s_and_b32 s1, s1, s3
|
||||
; GFX9-NEXT: s_and_b32 s0, s0, 0xffff
|
||||
; GFX9-NEXT: s_lshr_b32 s3, s1, 16
|
||||
; GFX9-NEXT: s_lshr_b32 s0, s0, s1
|
||||
; GFX9-NEXT: s_lshr_b32 s1, s2, s4
|
||||
; GFX9-NEXT: s_lshr_b32 s1, s2, s3
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s1
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: s_lshr_v2i16:
|
||||
; GFX10: ; %bb.0:
|
||||
; GFX10-NEXT: s_mov_b32 s2, 0xffff
|
||||
; GFX10-NEXT: s_lshr_b32 s3, s0, 16
|
||||
; GFX10-NEXT: s_and_b32 s0, s0, s2
|
||||
; GFX10-NEXT: s_and_b32 s2, s1, s2
|
||||
; GFX10-NEXT: s_lshr_b32 s1, s1, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s0, s0, s2
|
||||
; GFX10-NEXT: s_lshr_b32 s1, s3, s1
|
||||
; GFX10-NEXT: s_lshr_b32 s2, s0, 16
|
||||
; GFX10-NEXT: s_and_b32 s0, s0, 0xffff
|
||||
; GFX10-NEXT: s_lshr_b32 s3, s1, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s0, s0, s1
|
||||
; GFX10-NEXT: s_lshr_b32 s1, s2, s3
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s1
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
%result = lshr <2 x i16> %value, %amount
|
||||
|
@ -992,19 +941,15 @@ define amdgpu_ps <2 x i32> @s_lshr_v4i16(<4 x i16> inreg %value, <4 x i16> inreg
|
|||
; GFX6-LABEL: s_lshr_v4i16:
|
||||
; GFX6: ; %bb.0:
|
||||
; GFX6-NEXT: s_mov_b32 s8, 0xffff
|
||||
; GFX6-NEXT: s_and_b32 s4, s4, s8
|
||||
; GFX6-NEXT: s_and_b32 s0, s0, s8
|
||||
; GFX6-NEXT: s_lshr_b32 s0, s0, s4
|
||||
; GFX6-NEXT: s_and_b32 s4, s5, s8
|
||||
; GFX6-NEXT: s_and_b32 s1, s1, s8
|
||||
; GFX6-NEXT: s_lshr_b32 s1, s1, s4
|
||||
; GFX6-NEXT: s_and_b32 s4, s6, s8
|
||||
; GFX6-NEXT: s_and_b32 s2, s2, s8
|
||||
; GFX6-NEXT: s_lshr_b32 s2, s2, s4
|
||||
; GFX6-NEXT: s_and_b32 s4, s7, s8
|
||||
; GFX6-NEXT: s_and_b32 s0, s0, s8
|
||||
; GFX6-NEXT: s_lshr_b32 s1, s1, s5
|
||||
; GFX6-NEXT: s_and_b32 s3, s3, s8
|
||||
; GFX6-NEXT: s_lshr_b32 s3, s3, s4
|
||||
; GFX6-NEXT: s_lshr_b32 s0, s0, s4
|
||||
; GFX6-NEXT: s_and_b32 s2, s2, s8
|
||||
; GFX6-NEXT: s_lshr_b32 s3, s3, s7
|
||||
; GFX6-NEXT: s_lshl_b32 s1, s1, 16
|
||||
; GFX6-NEXT: s_lshr_b32 s2, s2, s6
|
||||
; GFX6-NEXT: s_or_b32 s0, s0, s1
|
||||
; GFX6-NEXT: s_lshl_b32 s1, s3, 16
|
||||
; GFX6-NEXT: s_or_b32 s1, s2, s1
|
||||
|
@ -1016,11 +961,9 @@ define amdgpu_ps <2 x i32> @s_lshr_v4i16(<4 x i16> inreg %value, <4 x i16> inreg
|
|||
; GFX8-NEXT: s_lshr_b32 s4, s0, 16
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s6
|
||||
; GFX8-NEXT: s_lshr_b32 s7, s2, 16
|
||||
; GFX8-NEXT: s_and_b32 s2, s2, s6
|
||||
; GFX8-NEXT: s_lshr_b32 s5, s1, 16
|
||||
; GFX8-NEXT: s_and_b32 s1, s1, s6
|
||||
; GFX8-NEXT: s_lshr_b32 s8, s3, 16
|
||||
; GFX8-NEXT: s_and_b32 s3, s3, s6
|
||||
; GFX8-NEXT: s_lshr_b32 s0, s0, s2
|
||||
; GFX8-NEXT: s_lshr_b32 s2, s4, s7
|
||||
; GFX8-NEXT: s_lshr_b32 s1, s1, s3
|
||||
|
@ -1039,14 +982,12 @@ define amdgpu_ps <2 x i32> @s_lshr_v4i16(<4 x i16> inreg %value, <4 x i16> inreg
|
|||
; GFX9-NEXT: s_lshr_b32 s4, s0, 16
|
||||
; GFX9-NEXT: s_and_b32 s0, s0, s5
|
||||
; GFX9-NEXT: s_lshr_b32 s6, s2, 16
|
||||
; GFX9-NEXT: s_and_b32 s2, s2, s5
|
||||
; GFX9-NEXT: s_lshr_b32 s0, s0, s2
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s4, s6
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s2
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s1, 16
|
||||
; GFX9-NEXT: s_and_b32 s1, s1, s5
|
||||
; GFX9-NEXT: s_lshr_b32 s4, s3, 16
|
||||
; GFX9-NEXT: s_and_b32 s3, s3, s5
|
||||
; GFX9-NEXT: s_lshr_b32 s1, s1, s3
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s2, s4
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s2
|
||||
|
@ -1056,17 +997,15 @@ define amdgpu_ps <2 x i32> @s_lshr_v4i16(<4 x i16> inreg %value, <4 x i16> inreg
|
|||
; GFX10: ; %bb.0:
|
||||
; GFX10-NEXT: s_mov_b32 s4, 0xffff
|
||||
; GFX10-NEXT: s_lshr_b32 s5, s0, 16
|
||||
; GFX10-NEXT: s_and_b32 s6, s2, s4
|
||||
; GFX10-NEXT: s_lshr_b32 s2, s2, 16
|
||||
; GFX10-NEXT: s_and_b32 s0, s0, s4
|
||||
; GFX10-NEXT: s_lshr_b32 s2, s5, s2
|
||||
; GFX10-NEXT: s_lshr_b32 s6, s2, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s0, s0, s2
|
||||
; GFX10-NEXT: s_lshr_b32 s2, s5, s6
|
||||
; GFX10-NEXT: s_lshr_b32 s5, s1, 16
|
||||
; GFX10-NEXT: s_and_b32 s1, s1, s4
|
||||
; GFX10-NEXT: s_and_b32 s4, s3, s4
|
||||
; GFX10-NEXT: s_lshr_b32 s3, s3, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s0, s0, s6
|
||||
; GFX10-NEXT: s_lshr_b32 s1, s1, s4
|
||||
; GFX10-NEXT: s_lshr_b32 s3, s5, s3
|
||||
; GFX10-NEXT: s_lshr_b32 s4, s3, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s1, s1, s3
|
||||
; GFX10-NEXT: s_lshr_b32 s3, s5, s4
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s2
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s3
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
|
@ -1182,33 +1121,25 @@ define amdgpu_ps <4 x i32> @s_lshr_v8i16(<8 x i16> inreg %value, <8 x i16> inreg
|
|||
; GFX6-LABEL: s_lshr_v8i16:
|
||||
; GFX6: ; %bb.0:
|
||||
; GFX6-NEXT: s_mov_b32 s16, 0xffff
|
||||
; GFX6-NEXT: s_and_b32 s8, s8, s16
|
||||
; GFX6-NEXT: s_and_b32 s0, s0, s16
|
||||
; GFX6-NEXT: s_lshr_b32 s0, s0, s8
|
||||
; GFX6-NEXT: s_and_b32 s8, s9, s16
|
||||
; GFX6-NEXT: s_and_b32 s1, s1, s16
|
||||
; GFX6-NEXT: s_lshr_b32 s1, s1, s8
|
||||
; GFX6-NEXT: s_and_b32 s8, s10, s16
|
||||
; GFX6-NEXT: s_and_b32 s2, s2, s16
|
||||
; GFX6-NEXT: s_lshr_b32 s2, s2, s8
|
||||
; GFX6-NEXT: s_and_b32 s8, s11, s16
|
||||
; GFX6-NEXT: s_and_b32 s0, s0, s16
|
||||
; GFX6-NEXT: s_lshr_b32 s1, s1, s9
|
||||
; GFX6-NEXT: s_and_b32 s3, s3, s16
|
||||
; GFX6-NEXT: s_lshr_b32 s3, s3, s8
|
||||
; GFX6-NEXT: s_and_b32 s8, s12, s16
|
||||
; GFX6-NEXT: s_and_b32 s4, s4, s16
|
||||
; GFX6-NEXT: s_lshr_b32 s4, s4, s8
|
||||
; GFX6-NEXT: s_and_b32 s8, s13, s16
|
||||
; GFX6-NEXT: s_lshr_b32 s0, s0, s8
|
||||
; GFX6-NEXT: s_and_b32 s2, s2, s16
|
||||
; GFX6-NEXT: s_lshr_b32 s3, s3, s11
|
||||
; GFX6-NEXT: s_and_b32 s5, s5, s16
|
||||
; GFX6-NEXT: s_lshr_b32 s5, s5, s8
|
||||
; GFX6-NEXT: s_and_b32 s8, s14, s16
|
||||
; GFX6-NEXT: s_and_b32 s6, s6, s16
|
||||
; GFX6-NEXT: s_lshr_b32 s6, s6, s8
|
||||
; GFX6-NEXT: s_and_b32 s8, s15, s16
|
||||
; GFX6-NEXT: s_and_b32 s7, s7, s16
|
||||
; GFX6-NEXT: s_lshl_b32 s1, s1, 16
|
||||
; GFX6-NEXT: s_lshr_b32 s7, s7, s8
|
||||
; GFX6-NEXT: s_lshr_b32 s2, s2, s10
|
||||
; GFX6-NEXT: s_and_b32 s4, s4, s16
|
||||
; GFX6-NEXT: s_lshr_b32 s5, s5, s13
|
||||
; GFX6-NEXT: s_and_b32 s6, s6, s16
|
||||
; GFX6-NEXT: s_lshr_b32 s7, s7, s15
|
||||
; GFX6-NEXT: s_or_b32 s0, s0, s1
|
||||
; GFX6-NEXT: s_lshl_b32 s1, s3, 16
|
||||
; GFX6-NEXT: s_lshr_b32 s4, s4, s12
|
||||
; GFX6-NEXT: s_lshr_b32 s6, s6, s14
|
||||
; GFX6-NEXT: s_or_b32 s1, s2, s1
|
||||
; GFX6-NEXT: s_lshl_b32 s2, s5, 16
|
||||
; GFX6-NEXT: s_lshl_b32 s3, s7, 16
|
||||
|
@ -1222,17 +1153,14 @@ define amdgpu_ps <4 x i32> @s_lshr_v8i16(<8 x i16> inreg %value, <8 x i16> inreg
|
|||
; GFX8-NEXT: s_lshr_b32 s8, s0, 16
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s12
|
||||
; GFX8-NEXT: s_lshr_b32 s13, s4, 16
|
||||
; GFX8-NEXT: s_and_b32 s4, s4, s12
|
||||
; GFX8-NEXT: s_lshr_b32 s9, s1, 16
|
||||
; GFX8-NEXT: s_and_b32 s1, s1, s12
|
||||
; GFX8-NEXT: s_lshr_b32 s14, s5, 16
|
||||
; GFX8-NEXT: s_and_b32 s5, s5, s12
|
||||
; GFX8-NEXT: s_lshr_b32 s0, s0, s4
|
||||
; GFX8-NEXT: s_lshr_b32 s4, s8, s13
|
||||
; GFX8-NEXT: s_lshr_b32 s10, s2, 16
|
||||
; GFX8-NEXT: s_and_b32 s2, s2, s12
|
||||
; GFX8-NEXT: s_lshr_b32 s15, s6, 16
|
||||
; GFX8-NEXT: s_and_b32 s6, s6, s12
|
||||
; GFX8-NEXT: s_lshr_b32 s1, s1, s5
|
||||
; GFX8-NEXT: s_lshr_b32 s5, s9, s14
|
||||
; GFX8-NEXT: s_lshl_b32 s4, s4, 16
|
||||
|
@ -1240,7 +1168,6 @@ define amdgpu_ps <4 x i32> @s_lshr_v8i16(<8 x i16> inreg %value, <8 x i16> inreg
|
|||
; GFX8-NEXT: s_lshr_b32 s11, s3, 16
|
||||
; GFX8-NEXT: s_and_b32 s3, s3, s12
|
||||
; GFX8-NEXT: s_lshr_b32 s16, s7, 16
|
||||
; GFX8-NEXT: s_and_b32 s7, s7, s12
|
||||
; GFX8-NEXT: s_lshr_b32 s2, s2, s6
|
||||
; GFX8-NEXT: s_lshr_b32 s6, s10, s15
|
||||
; GFX8-NEXT: s_or_b32 s0, s4, s0
|
||||
|
@ -1263,29 +1190,25 @@ define amdgpu_ps <4 x i32> @s_lshr_v8i16(<8 x i16> inreg %value, <8 x i16> inreg
|
|||
; GFX9-NEXT: s_lshr_b32 s8, s0, 16
|
||||
; GFX9-NEXT: s_and_b32 s0, s0, s9
|
||||
; GFX9-NEXT: s_lshr_b32 s10, s4, 16
|
||||
; GFX9-NEXT: s_and_b32 s4, s4, s9
|
||||
; GFX9-NEXT: s_lshr_b32 s0, s0, s4
|
||||
; GFX9-NEXT: s_lshr_b32 s4, s8, s10
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s4
|
||||
; GFX9-NEXT: s_lshr_b32 s4, s1, 16
|
||||
; GFX9-NEXT: s_and_b32 s1, s1, s9
|
||||
; GFX9-NEXT: s_lshr_b32 s8, s5, 16
|
||||
; GFX9-NEXT: s_and_b32 s5, s5, s9
|
||||
; GFX9-NEXT: s_lshr_b32 s1, s1, s5
|
||||
; GFX9-NEXT: s_lshr_b32 s4, s4, s8
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s4
|
||||
; GFX9-NEXT: s_lshr_b32 s4, s2, 16
|
||||
; GFX9-NEXT: s_and_b32 s2, s2, s9
|
||||
; GFX9-NEXT: s_lshr_b32 s5, s6, 16
|
||||
; GFX9-NEXT: s_and_b32 s6, s6, s9
|
||||
; GFX9-NEXT: s_lshr_b32 s2, s2, s6
|
||||
; GFX9-NEXT: s_lshr_b32 s4, s4, s5
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s4
|
||||
; GFX9-NEXT: s_lshr_b32 s4, s3, 16
|
||||
; GFX9-NEXT: s_and_b32 s3, s3, s9
|
||||
; GFX9-NEXT: s_lshr_b32 s5, s7, 16
|
||||
; GFX9-NEXT: s_and_b32 s6, s7, s9
|
||||
; GFX9-NEXT: s_lshr_b32 s3, s3, s6
|
||||
; GFX9-NEXT: s_lshr_b32 s3, s3, s7
|
||||
; GFX9-NEXT: s_lshr_b32 s4, s4, s5
|
||||
; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s4
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
|
@ -1295,30 +1218,26 @@ define amdgpu_ps <4 x i32> @s_lshr_v8i16(<8 x i16> inreg %value, <8 x i16> inreg
|
|||
; GFX10-NEXT: s_mov_b32 s8, 0xffff
|
||||
; GFX10-NEXT: s_lshr_b32 s9, s0, 16
|
||||
; GFX10-NEXT: s_and_b32 s0, s0, s8
|
||||
; GFX10-NEXT: s_and_b32 s10, s4, s8
|
||||
; GFX10-NEXT: s_lshr_b32 s4, s4, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s0, s0, s10
|
||||
; GFX10-NEXT: s_lshr_b32 s4, s9, s4
|
||||
; GFX10-NEXT: s_lshr_b32 s10, s4, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s0, s0, s4
|
||||
; GFX10-NEXT: s_lshr_b32 s4, s9, s10
|
||||
; GFX10-NEXT: s_lshr_b32 s9, s1, 16
|
||||
; GFX10-NEXT: s_and_b32 s1, s1, s8
|
||||
; GFX10-NEXT: s_and_b32 s10, s5, s8
|
||||
; GFX10-NEXT: s_lshr_b32 s5, s5, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s1, s1, s10
|
||||
; GFX10-NEXT: s_lshr_b32 s5, s9, s5
|
||||
; GFX10-NEXT: s_lshr_b32 s10, s5, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s1, s1, s5
|
||||
; GFX10-NEXT: s_lshr_b32 s5, s9, s10
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s4
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s5
|
||||
; GFX10-NEXT: s_lshr_b32 s4, s2, 16
|
||||
; GFX10-NEXT: s_and_b32 s2, s2, s8
|
||||
; GFX10-NEXT: s_and_b32 s5, s6, s8
|
||||
; GFX10-NEXT: s_lshr_b32 s6, s6, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s2, s2, s5
|
||||
; GFX10-NEXT: s_lshr_b32 s4, s4, s6
|
||||
; GFX10-NEXT: s_lshr_b32 s5, s6, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s2, s2, s6
|
||||
; GFX10-NEXT: s_lshr_b32 s4, s4, s5
|
||||
; GFX10-NEXT: s_lshr_b32 s5, s3, 16
|
||||
; GFX10-NEXT: s_and_b32 s3, s3, s8
|
||||
; GFX10-NEXT: s_and_b32 s6, s7, s8
|
||||
; GFX10-NEXT: s_lshr_b32 s7, s7, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s3, s3, s6
|
||||
; GFX10-NEXT: s_lshr_b32 s5, s5, s7
|
||||
; GFX10-NEXT: s_lshr_b32 s6, s7, 16
|
||||
; GFX10-NEXT: s_lshr_b32 s3, s3, s7
|
||||
; GFX10-NEXT: s_lshr_b32 s5, s5, s6
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s4
|
||||
; GFX10-NEXT: s_pack_ll_b32_b16 s3, s3, s5
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
|
|
|
@ -67,31 +67,24 @@ define i8 @v_shl_i8_7(i8 %value) {
|
|||
define amdgpu_ps i8 @s_shl_i8(i8 inreg %value, i8 inreg %amount) {
|
||||
; GFX6-LABEL: s_shl_i8:
|
||||
; GFX6: ; %bb.0:
|
||||
; GFX6-NEXT: s_and_b32 s1, s1, 0xff
|
||||
; GFX6-NEXT: s_lshl_b32 s0, s0, s1
|
||||
; GFX6-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX8-LABEL: s_shl_i8:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: s_movk_i32 s2, 0xff
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s2
|
||||
; GFX8-NEXT: s_and_b32 s1, s1, s2
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, 0xff
|
||||
; GFX8-NEXT: s_lshl_b32 s0, s0, s1
|
||||
; GFX8-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX9-LABEL: s_shl_i8:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: s_movk_i32 s2, 0xff
|
||||
; GFX9-NEXT: s_and_b32 s0, s0, s2
|
||||
; GFX9-NEXT: s_and_b32 s1, s1, s2
|
||||
; GFX9-NEXT: s_and_b32 s0, s0, 0xff
|
||||
; GFX9-NEXT: s_lshl_b32 s0, s0, s1
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: s_shl_i8:
|
||||
; GFX10: ; %bb.0:
|
||||
; GFX10-NEXT: s_movk_i32 s2, 0xff
|
||||
; GFX10-NEXT: s_and_b32 s0, s0, s2
|
||||
; GFX10-NEXT: s_and_b32 s1, s1, s2
|
||||
; GFX10-NEXT: s_and_b32 s0, s0, 0xff
|
||||
; GFX10-NEXT: s_lshl_b32 s0, s0, s1
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
%result = shl i8 %value, %amount
|
||||
|
@ -165,13 +158,11 @@ define i24 @v_shl_i24_7(i24 %value) {
|
|||
define amdgpu_ps i24 @s_shl_i24(i24 inreg %value, i24 inreg %amount) {
|
||||
; GCN-LABEL: s_shl_i24:
|
||||
; GCN: ; %bb.0:
|
||||
; GCN-NEXT: s_and_b32 s1, s1, 0xffffff
|
||||
; GCN-NEXT: s_lshl_b32 s0, s0, s1
|
||||
; GCN-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: s_shl_i24:
|
||||
; GFX10: ; %bb.0:
|
||||
; GFX10-NEXT: s_and_b32 s1, s1, 0xffffff
|
||||
; GFX10-NEXT: s_lshl_b32 s0, s0, s1
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
%result = shl i24 %value, %amount
|
||||
|
@ -616,31 +607,24 @@ define i16 @v_shl_i16_31(i16 %value) {
|
|||
define amdgpu_ps i16 @s_shl_i16(i16 inreg %value, i16 inreg %amount) {
|
||||
; GFX6-LABEL: s_shl_i16:
|
||||
; GFX6: ; %bb.0:
|
||||
; GFX6-NEXT: s_and_b32 s1, s1, 0xffff
|
||||
; GFX6-NEXT: s_lshl_b32 s0, s0, s1
|
||||
; GFX6-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX8-LABEL: s_shl_i16:
|
||||
; GFX8: ; %bb.0:
|
||||
; GFX8-NEXT: s_mov_b32 s2, 0xffff
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s2
|
||||
; GFX8-NEXT: s_and_b32 s1, s1, s2
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, 0xffff
|
||||
; GFX8-NEXT: s_lshl_b32 s0, s0, s1
|
||||
; GFX8-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX9-LABEL: s_shl_i16:
|
||||
; GFX9: ; %bb.0:
|
||||
; GFX9-NEXT: s_mov_b32 s2, 0xffff
|
||||
; GFX9-NEXT: s_and_b32 s0, s0, s2
|
||||
; GFX9-NEXT: s_and_b32 s1, s1, s2
|
||||
; GFX9-NEXT: s_and_b32 s0, s0, 0xffff
|
||||
; GFX9-NEXT: s_lshl_b32 s0, s0, s1
|
||||
; GFX9-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GFX10-LABEL: s_shl_i16:
|
||||
; GFX10: ; %bb.0:
|
||||
; GFX10-NEXT: s_mov_b32 s2, 0xffff
|
||||
; GFX10-NEXT: s_and_b32 s0, s0, s2
|
||||
; GFX10-NEXT: s_and_b32 s1, s1, s2
|
||||
; GFX10-NEXT: s_and_b32 s0, s0, 0xffff
|
||||
; GFX10-NEXT: s_lshl_b32 s0, s0, s1
|
||||
; GFX10-NEXT: ; return to shader part epilog
|
||||
%result = shl i16 %value, %amount
|
||||
|
@ -798,10 +782,8 @@ define amdgpu_ps i32 @s_shl_v2i16(<2 x i16> inreg %value, <2 x i16> inreg %amoun
|
|||
; GFX6-LABEL: s_shl_v2i16:
|
||||
; GFX6: ; %bb.0:
|
||||
; GFX6-NEXT: s_mov_b32 s4, 0xffff
|
||||
; GFX6-NEXT: s_and_b32 s2, s2, s4
|
||||
; GFX6-NEXT: s_lshl_b32 s1, s1, s3
|
||||
; GFX6-NEXT: s_lshl_b32 s0, s0, s2
|
||||
; GFX6-NEXT: s_and_b32 s2, s3, s4
|
||||
; GFX6-NEXT: s_lshl_b32 s1, s1, s2
|
||||
; GFX6-NEXT: s_and_b32 s1, s1, s4
|
||||
; GFX6-NEXT: s_and_b32 s0, s0, s4
|
||||
; GFX6-NEXT: s_lshl_b32 s1, s1, 16
|
||||
|
@ -814,7 +796,6 @@ define amdgpu_ps i32 @s_shl_v2i16(<2 x i16> inreg %value, <2 x i16> inreg %amoun
|
|||
; GFX8-NEXT: s_lshr_b32 s2, s0, 16
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s3
|
||||
; GFX8-NEXT: s_lshr_b32 s4, s1, 16
|
||||
; GFX8-NEXT: s_and_b32 s1, s1, s3
|
||||
; GFX8-NEXT: s_lshl_b32 s0, s0, s1
|
||||
; GFX8-NEXT: s_lshl_b32 s1, s2, s4
|
||||
; GFX8-NEXT: s_lshl_b32 s1, s1, 16
|
||||
|
@ -986,15 +967,11 @@ define amdgpu_ps <2 x i32> @s_shl_v4i16(<4 x i16> inreg %value, <4 x i16> inreg
|
|||
; GFX6-LABEL: s_shl_v4i16:
|
||||
; GFX6: ; %bb.0:
|
||||
; GFX6-NEXT: s_mov_b32 s8, 0xffff
|
||||
; GFX6-NEXT: s_and_b32 s4, s4, s8
|
||||
; GFX6-NEXT: s_lshl_b32 s1, s1, s5
|
||||
; GFX6-NEXT: s_lshl_b32 s0, s0, s4
|
||||
; GFX6-NEXT: s_and_b32 s4, s5, s8
|
||||
; GFX6-NEXT: s_lshl_b32 s1, s1, s4
|
||||
; GFX6-NEXT: s_and_b32 s4, s6, s8
|
||||
; GFX6-NEXT: s_lshl_b32 s2, s2, s4
|
||||
; GFX6-NEXT: s_and_b32 s4, s7, s8
|
||||
; GFX6-NEXT: s_and_b32 s1, s1, s8
|
||||
; GFX6-NEXT: s_lshl_b32 s3, s3, s4
|
||||
; GFX6-NEXT: s_lshl_b32 s2, s2, s6
|
||||
; GFX6-NEXT: s_lshl_b32 s3, s3, s7
|
||||
; GFX6-NEXT: s_and_b32 s0, s0, s8
|
||||
; GFX6-NEXT: s_lshl_b32 s1, s1, 16
|
||||
; GFX6-NEXT: s_or_b32 s0, s0, s1
|
||||
|
@ -1010,11 +987,9 @@ define amdgpu_ps <2 x i32> @s_shl_v4i16(<4 x i16> inreg %value, <4 x i16> inreg
|
|||
; GFX8-NEXT: s_lshr_b32 s4, s0, 16
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s6
|
||||
; GFX8-NEXT: s_lshr_b32 s7, s2, 16
|
||||
; GFX8-NEXT: s_and_b32 s2, s2, s6
|
||||
; GFX8-NEXT: s_lshr_b32 s5, s1, 16
|
||||
; GFX8-NEXT: s_and_b32 s1, s1, s6
|
||||
; GFX8-NEXT: s_lshr_b32 s8, s3, 16
|
||||
; GFX8-NEXT: s_and_b32 s3, s3, s6
|
||||
; GFX8-NEXT: s_lshl_b32 s0, s0, s2
|
||||
; GFX8-NEXT: s_lshl_b32 s2, s4, s7
|
||||
; GFX8-NEXT: s_lshl_b32 s1, s1, s3
|
||||
|
@ -1166,30 +1141,22 @@ define amdgpu_ps <4 x i32> @s_shl_v8i16(<8 x i16> inreg %value, <8 x i16> inreg
|
|||
; GFX6-LABEL: s_shl_v8i16:
|
||||
; GFX6: ; %bb.0:
|
||||
; GFX6-NEXT: s_mov_b32 s16, 0xffff
|
||||
; GFX6-NEXT: s_and_b32 s8, s8, s16
|
||||
; GFX6-NEXT: s_lshl_b32 s1, s1, s9
|
||||
; GFX6-NEXT: s_lshl_b32 s0, s0, s8
|
||||
; GFX6-NEXT: s_and_b32 s8, s9, s16
|
||||
; GFX6-NEXT: s_lshl_b32 s1, s1, s8
|
||||
; GFX6-NEXT: s_and_b32 s8, s10, s16
|
||||
; GFX6-NEXT: s_lshl_b32 s2, s2, s8
|
||||
; GFX6-NEXT: s_and_b32 s8, s11, s16
|
||||
; GFX6-NEXT: s_lshl_b32 s3, s3, s8
|
||||
; GFX6-NEXT: s_and_b32 s8, s12, s16
|
||||
; GFX6-NEXT: s_lshl_b32 s4, s4, s8
|
||||
; GFX6-NEXT: s_and_b32 s8, s13, s16
|
||||
; GFX6-NEXT: s_and_b32 s1, s1, s16
|
||||
; GFX6-NEXT: s_lshl_b32 s5, s5, s8
|
||||
; GFX6-NEXT: s_and_b32 s8, s14, s16
|
||||
; GFX6-NEXT: s_lshl_b32 s2, s2, s10
|
||||
; GFX6-NEXT: s_lshl_b32 s3, s3, s11
|
||||
; GFX6-NEXT: s_and_b32 s0, s0, s16
|
||||
; GFX6-NEXT: s_lshl_b32 s1, s1, 16
|
||||
; GFX6-NEXT: s_lshl_b32 s6, s6, s8
|
||||
; GFX6-NEXT: s_and_b32 s8, s15, s16
|
||||
; GFX6-NEXT: s_lshl_b32 s5, s5, s13
|
||||
; GFX6-NEXT: s_or_b32 s0, s0, s1
|
||||
; GFX6-NEXT: s_and_b32 s1, s2, s16
|
||||
; GFX6-NEXT: s_and_b32 s2, s3, s16
|
||||
; GFX6-NEXT: s_lshl_b32 s7, s7, s8
|
||||
; GFX6-NEXT: s_lshl_b32 s4, s4, s12
|
||||
; GFX6-NEXT: s_lshl_b32 s7, s7, s15
|
||||
; GFX6-NEXT: s_lshl_b32 s2, s2, 16
|
||||
; GFX6-NEXT: s_and_b32 s3, s5, s16
|
||||
; GFX6-NEXT: s_lshl_b32 s6, s6, s14
|
||||
; GFX6-NEXT: s_or_b32 s1, s1, s2
|
||||
; GFX6-NEXT: s_and_b32 s2, s4, s16
|
||||
; GFX6-NEXT: s_lshl_b32 s3, s3, 16
|
||||
|
@ -1206,17 +1173,14 @@ define amdgpu_ps <4 x i32> @s_shl_v8i16(<8 x i16> inreg %value, <8 x i16> inreg
|
|||
; GFX8-NEXT: s_lshr_b32 s8, s0, 16
|
||||
; GFX8-NEXT: s_and_b32 s0, s0, s12
|
||||
; GFX8-NEXT: s_lshr_b32 s13, s4, 16
|
||||
; GFX8-NEXT: s_and_b32 s4, s4, s12
|
||||
; GFX8-NEXT: s_lshr_b32 s9, s1, 16
|
||||
; GFX8-NEXT: s_and_b32 s1, s1, s12
|
||||
; GFX8-NEXT: s_lshr_b32 s14, s5, 16
|
||||
; GFX8-NEXT: s_and_b32 s5, s5, s12
|
||||
; GFX8-NEXT: s_lshl_b32 s0, s0, s4
|
||||
; GFX8-NEXT: s_lshl_b32 s4, s8, s13
|
||||
; GFX8-NEXT: s_lshr_b32 s10, s2, 16
|
||||
; GFX8-NEXT: s_and_b32 s2, s2, s12
|
||||
; GFX8-NEXT: s_lshr_b32 s15, s6, 16
|
||||
; GFX8-NEXT: s_and_b32 s6, s6, s12
|
||||
; GFX8-NEXT: s_lshl_b32 s1, s1, s5
|
||||
; GFX8-NEXT: s_lshl_b32 s5, s9, s14
|
||||
; GFX8-NEXT: s_lshl_b32 s4, s4, 16
|
||||
|
@ -1224,7 +1188,6 @@ define amdgpu_ps <4 x i32> @s_shl_v8i16(<8 x i16> inreg %value, <8 x i16> inreg
|
|||
; GFX8-NEXT: s_lshr_b32 s11, s3, 16
|
||||
; GFX8-NEXT: s_and_b32 s3, s3, s12
|
||||
; GFX8-NEXT: s_lshr_b32 s16, s7, 16
|
||||
; GFX8-NEXT: s_and_b32 s7, s7, s12
|
||||
; GFX8-NEXT: s_lshl_b32 s2, s2, s6
|
||||
; GFX8-NEXT: s_lshl_b32 s6, s10, s15
|
||||
; GFX8-NEXT: s_or_b32 s0, s4, s0
|
||||
|
|
|
@ -37,15 +37,13 @@ define amdgpu_kernel void @s_ashr_v2i16(<2 x i16> addrspace(1)* %out, i32, <2 x
|
|||
; VI: v_ashrrev_i16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
||||
; VI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
||||
|
||||
; CI: s_mov_b32 [[MASK:s[0-9]+]], 0xffff{{$}}
|
||||
; CI-DAG: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 16
|
||||
; CI: v_ashrrev_i32_e32 v{{[0-9]+}}, 16, [[LHS]]
|
||||
; CI-DAG: v_and_b32_e32 v{{[0-9]+}}, [[MASK]], [[RHS]]
|
||||
; CI: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
|
||||
; CI: v_ashr_i32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
||||
; CI: v_ashr_i32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
||||
; CI: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
|
||||
; CI: v_and_b32_e32 v{{[0-9]+}}, [[MASK]], v{{[0-9]+}}
|
||||
; CI: v_and_b32_e32 v{{[0-9]+}}, 0xffff, v{{[0-9]+}}
|
||||
; CI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
||||
define amdgpu_kernel void @v_ashr_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
|
||||
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -march=amdgcn -mcpu=gfx900 < %s | FileCheck %s
|
||||
; RUN: llc -march=amdgcn -mcpu=gfx900 -global-isel < %s | FileCheck -check-prefix=GISEL %s
|
||||
|
||||
define i16 @csh_16(i16 %a, i16 %b) {
|
||||
; CHECK-LABEL: csh_16:
|
||||
|
@ -11,6 +12,17 @@ define i16 @csh_16(i16 %a, i16 %b) {
|
|||
; CHECK-NEXT: v_add_u16_e32 v1, v2, v3
|
||||
; CHECK-NEXT: v_add_u16_e32 v0, v1, v0
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
;
|
||||
; GISEL-LABEL: csh_16:
|
||||
; GISEL: ; %bb.0:
|
||||
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GISEL-NEXT: v_and_b32_e32 v1, 15, v1
|
||||
; GISEL-NEXT: v_lshlrev_b16_e32 v2, v1, v0
|
||||
; GISEL-NEXT: v_lshrrev_b16_e32 v3, v1, v0
|
||||
; GISEL-NEXT: v_ashrrev_i16_e32 v0, v1, v0
|
||||
; GISEL-NEXT: v_add_u16_e32 v1, v2, v3
|
||||
; GISEL-NEXT: v_add_u16_e32 v0, v1, v0
|
||||
; GISEL-NEXT: s_setpc_b64 s[30:31]
|
||||
%and = and i16 %b, 15
|
||||
%shl = shl i16 %a, %and
|
||||
%lshr = lshr i16 %a, %and
|
||||
|
@ -29,6 +41,16 @@ define i32 @csh_32(i32 %a, i32 %b) {
|
|||
; CHECK-NEXT: v_ashrrev_i32_e32 v0, v1, v0
|
||||
; CHECK-NEXT: v_add3_u32 v0, v2, v3, v0
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
;
|
||||
; GISEL-LABEL: csh_32:
|
||||
; GISEL: ; %bb.0:
|
||||
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GISEL-NEXT: v_and_b32_e32 v1, 31, v1
|
||||
; GISEL-NEXT: v_lshlrev_b32_e32 v2, v1, v0
|
||||
; GISEL-NEXT: v_lshrrev_b32_e32 v3, v1, v0
|
||||
; GISEL-NEXT: v_ashrrev_i32_e32 v0, v1, v0
|
||||
; GISEL-NEXT: v_add3_u32 v0, v2, v3, v0
|
||||
; GISEL-NEXT: s_setpc_b64 s[30:31]
|
||||
%and = and i32 %b, 31
|
||||
%shl = shl i32 %a, %and
|
||||
%lshr = lshr i32 %a, %and
|
||||
|
@ -38,8 +60,8 @@ define i32 @csh_32(i32 %a, i32 %b) {
|
|||
ret i32 %ret
|
||||
}
|
||||
|
||||
define amdgpu_ps i32 @s_csh_32(i32 inreg %a, i32 inreg %b) {
|
||||
; CHECK-LABEL: s_csh_32:
|
||||
define amdgpu_ps i32 @s_csh_32_0(i32 inreg %a, i32 inreg %b) {
|
||||
; CHECK-LABEL: s_csh_32_0:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: s_lshl_b32 s2, s0, s1
|
||||
; CHECK-NEXT: s_lshr_b32 s3, s0, s1
|
||||
|
@ -47,6 +69,15 @@ define amdgpu_ps i32 @s_csh_32(i32 inreg %a, i32 inreg %b) {
|
|||
; CHECK-NEXT: s_add_i32 s1, s2, s3
|
||||
; CHECK-NEXT: s_add_i32 s0, s1, s0
|
||||
; CHECK-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GISEL-LABEL: s_csh_32_0:
|
||||
; GISEL: ; %bb.0:
|
||||
; GISEL-NEXT: s_lshl_b32 s2, s0, s1
|
||||
; GISEL-NEXT: s_lshr_b32 s3, s0, s1
|
||||
; GISEL-NEXT: s_ashr_i32 s0, s0, s1
|
||||
; GISEL-NEXT: s_add_i32 s1, s2, s3
|
||||
; GISEL-NEXT: s_add_i32 s0, s1, s0
|
||||
; GISEL-NEXT: ; return to shader part epilog
|
||||
%and = and i32 %b, 31
|
||||
%shl = shl i32 %a, %and
|
||||
%lshr = lshr i32 %a, %and
|
||||
|
@ -56,6 +87,33 @@ define amdgpu_ps i32 @s_csh_32(i32 inreg %a, i32 inreg %b) {
|
|||
ret i32 %ret
|
||||
}
|
||||
|
||||
define amdgpu_ps i32 @s_csh_32_1(i32 inreg %a, i32 inreg %b) {
|
||||
; CHECK-LABEL: s_csh_32_1:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: s_lshl_b32 s2, s0, s1
|
||||
; CHECK-NEXT: s_lshr_b32 s3, s0, s1
|
||||
; CHECK-NEXT: s_ashr_i32 s0, s0, s1
|
||||
; CHECK-NEXT: s_add_i32 s1, s2, s3
|
||||
; CHECK-NEXT: s_add_i32 s0, s1, s0
|
||||
; CHECK-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GISEL-LABEL: s_csh_32_1:
|
||||
; GISEL: ; %bb.0:
|
||||
; GISEL-NEXT: s_lshl_b32 s2, s0, s1
|
||||
; GISEL-NEXT: s_lshr_b32 s3, s0, s1
|
||||
; GISEL-NEXT: s_ashr_i32 s0, s0, s1
|
||||
; GISEL-NEXT: s_add_i32 s1, s2, s3
|
||||
; GISEL-NEXT: s_add_i32 s0, s1, s0
|
||||
; GISEL-NEXT: ; return to shader part epilog
|
||||
%and = and i32 %b, 127
|
||||
%shl = shl i32 %a, %and
|
||||
%lshr = lshr i32 %a, %and
|
||||
%ashr = ashr i32 %a, %and
|
||||
%ret.0 = add i32 %shl, %lshr
|
||||
%ret = add i32 %ret.0, %ashr
|
||||
ret i32 %ret
|
||||
}
|
||||
|
||||
define <4 x i32> @csh_v4i32(<4 x i32> %a, <4 x i32> %b) {
|
||||
; CHECK-LABEL: csh_v4i32:
|
||||
; CHECK: ; %bb.0:
|
||||
|
@ -77,6 +135,31 @@ define <4 x i32> @csh_v4i32(<4 x i32> %a, <4 x i32> %b) {
|
|||
; CHECK-NEXT: v_add3_u32 v2, v9, v13, v2
|
||||
; CHECK-NEXT: v_add3_u32 v3, v8, v12, v3
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
;
|
||||
; GISEL-LABEL: csh_v4i32:
|
||||
; GISEL: ; %bb.0:
|
||||
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GISEL-NEXT: v_and_b32_e32 v4, 31, v4
|
||||
; GISEL-NEXT: v_and_b32_e32 v5, 31, v5
|
||||
; GISEL-NEXT: v_and_b32_e32 v6, 31, v6
|
||||
; GISEL-NEXT: v_and_b32_e32 v7, 31, v7
|
||||
; GISEL-NEXT: v_lshlrev_b32_e32 v8, v4, v0
|
||||
; GISEL-NEXT: v_lshlrev_b32_e32 v9, v5, v1
|
||||
; GISEL-NEXT: v_lshlrev_b32_e32 v10, v6, v2
|
||||
; GISEL-NEXT: v_lshlrev_b32_e32 v11, v7, v3
|
||||
; GISEL-NEXT: v_lshrrev_b32_e32 v12, v4, v0
|
||||
; GISEL-NEXT: v_lshrrev_b32_e32 v13, v5, v1
|
||||
; GISEL-NEXT: v_lshrrev_b32_e32 v14, v6, v2
|
||||
; GISEL-NEXT: v_lshrrev_b32_e32 v15, v7, v3
|
||||
; GISEL-NEXT: v_ashrrev_i32_e32 v0, v4, v0
|
||||
; GISEL-NEXT: v_ashrrev_i32_e32 v1, v5, v1
|
||||
; GISEL-NEXT: v_ashrrev_i32_e32 v2, v6, v2
|
||||
; GISEL-NEXT: v_ashrrev_i32_e32 v3, v7, v3
|
||||
; GISEL-NEXT: v_add3_u32 v0, v8, v12, v0
|
||||
; GISEL-NEXT: v_add3_u32 v1, v9, v13, v1
|
||||
; GISEL-NEXT: v_add3_u32 v2, v10, v14, v2
|
||||
; GISEL-NEXT: v_add3_u32 v3, v11, v15, v3
|
||||
; GISEL-NEXT: s_setpc_b64 s[30:31]
|
||||
%and = and <4 x i32> %b, <i32 31, i32 31, i32 31, i32 31>
|
||||
%shl = shl <4 x i32> %a, %and
|
||||
%lshr = lshr <4 x i32> %a, %and
|
||||
|
@ -110,6 +193,34 @@ define amdgpu_ps <4 x i32> @s_csh_v4i32(<4 x i32> inreg %a, <4 x i32> inreg %b)
|
|||
; CHECK-NEXT: s_add_i32 s2, s5, s2
|
||||
; CHECK-NEXT: s_add_i32 s3, s4, s3
|
||||
; CHECK-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GISEL-LABEL: s_csh_v4i32:
|
||||
; GISEL: ; %bb.0:
|
||||
; GISEL-NEXT: s_mov_b32 s8, 31
|
||||
; GISEL-NEXT: s_mov_b32 s9, s8
|
||||
; GISEL-NEXT: s_and_b64 s[4:5], s[4:5], s[8:9]
|
||||
; GISEL-NEXT: s_and_b64 s[6:7], s[6:7], s[8:9]
|
||||
; GISEL-NEXT: s_lshl_b32 s8, s0, s4
|
||||
; GISEL-NEXT: s_lshl_b32 s9, s1, s5
|
||||
; GISEL-NEXT: s_lshl_b32 s10, s2, s6
|
||||
; GISEL-NEXT: s_lshl_b32 s11, s3, s7
|
||||
; GISEL-NEXT: s_lshr_b32 s12, s0, s4
|
||||
; GISEL-NEXT: s_lshr_b32 s13, s1, s5
|
||||
; GISEL-NEXT: s_lshr_b32 s14, s2, s6
|
||||
; GISEL-NEXT: s_lshr_b32 s15, s3, s7
|
||||
; GISEL-NEXT: s_ashr_i32 s0, s0, s4
|
||||
; GISEL-NEXT: s_ashr_i32 s1, s1, s5
|
||||
; GISEL-NEXT: s_ashr_i32 s2, s2, s6
|
||||
; GISEL-NEXT: s_ashr_i32 s3, s3, s7
|
||||
; GISEL-NEXT: s_add_i32 s4, s8, s12
|
||||
; GISEL-NEXT: s_add_i32 s5, s9, s13
|
||||
; GISEL-NEXT: s_add_i32 s6, s10, s14
|
||||
; GISEL-NEXT: s_add_i32 s7, s11, s15
|
||||
; GISEL-NEXT: s_add_i32 s0, s4, s0
|
||||
; GISEL-NEXT: s_add_i32 s1, s5, s1
|
||||
; GISEL-NEXT: s_add_i32 s2, s6, s2
|
||||
; GISEL-NEXT: s_add_i32 s3, s7, s3
|
||||
; GISEL-NEXT: ; return to shader part epilog
|
||||
%and = and <4 x i32> %b, <i32 31, i32 31, i32 31, i32 31>
|
||||
%shl = shl <4 x i32> %a, %and
|
||||
%lshr = lshr <4 x i32> %a, %and
|
||||
|
@ -131,6 +242,19 @@ define i64 @csh_64(i64 %a, i64 %b) {
|
|||
; CHECK-NEXT: v_add_co_u32_e32 v0, vcc, v2, v0
|
||||
; CHECK-NEXT: v_addc_co_u32_e32 v1, vcc, v3, v1, vcc
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
;
|
||||
; GISEL-LABEL: csh_64:
|
||||
; GISEL: ; %bb.0:
|
||||
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GISEL-NEXT: v_and_b32_e32 v6, 63, v2
|
||||
; GISEL-NEXT: v_lshlrev_b64 v[2:3], v6, v[0:1]
|
||||
; GISEL-NEXT: v_lshrrev_b64 v[4:5], v6, v[0:1]
|
||||
; GISEL-NEXT: v_ashrrev_i64 v[0:1], v6, v[0:1]
|
||||
; GISEL-NEXT: v_add_co_u32_e32 v2, vcc, v2, v4
|
||||
; GISEL-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v5, vcc
|
||||
; GISEL-NEXT: v_add_co_u32_e32 v0, vcc, v2, v0
|
||||
; GISEL-NEXT: v_addc_co_u32_e32 v1, vcc, v3, v1, vcc
|
||||
; GISEL-NEXT: s_setpc_b64 s[30:31]
|
||||
%and = and i64 %b, 63
|
||||
%shl = shl i64 %a, %and
|
||||
%lshr = lshr i64 %a, %and
|
||||
|
@ -140,8 +264,8 @@ define i64 @csh_64(i64 %a, i64 %b) {
|
|||
ret i64 %ret
|
||||
}
|
||||
|
||||
define amdgpu_ps i64 @s_csh_64(i64 inreg %a, i64 inreg %b) {
|
||||
; CHECK-LABEL: s_csh_64:
|
||||
define amdgpu_ps i64 @s_csh_64_0(i64 inreg %a, i64 inreg %b) {
|
||||
; CHECK-LABEL: s_csh_64_0:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: s_lshl_b64 s[4:5], s[0:1], s2
|
||||
; CHECK-NEXT: s_lshr_b64 s[6:7], s[0:1], s2
|
||||
|
@ -151,6 +275,24 @@ define amdgpu_ps i64 @s_csh_64(i64 inreg %a, i64 inreg %b) {
|
|||
; CHECK-NEXT: s_add_u32 s0, s2, s0
|
||||
; CHECK-NEXT: s_addc_u32 s1, s3, s1
|
||||
; CHECK-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GISEL-LABEL: s_csh_64_0:
|
||||
; GISEL: ; %bb.0:
|
||||
; GISEL-NEXT: s_and_b64 s[2:3], s[2:3], 63
|
||||
; GISEL-NEXT: s_lshl_b64 s[4:5], s[0:1], s2
|
||||
; GISEL-NEXT: s_lshr_b64 s[6:7], s[0:1], s2
|
||||
; GISEL-NEXT: s_ashr_i64 s[0:1], s[0:1], s2
|
||||
; GISEL-NEXT: s_add_u32 s2, s4, s6
|
||||
; GISEL-NEXT: s_cselect_b32 s3, 1, 0
|
||||
; GISEL-NEXT: s_and_b32 s3, s3, 1
|
||||
; GISEL-NEXT: s_cmp_lg_u32 s3, 0
|
||||
; GISEL-NEXT: s_addc_u32 s3, s5, s7
|
||||
; GISEL-NEXT: s_add_u32 s0, s2, s0
|
||||
; GISEL-NEXT: s_cselect_b32 s2, 1, 0
|
||||
; GISEL-NEXT: s_and_b32 s2, s2, 1
|
||||
; GISEL-NEXT: s_cmp_lg_u32 s2, 0
|
||||
; GISEL-NEXT: s_addc_u32 s1, s3, s1
|
||||
; GISEL-NEXT: ; return to shader part epilog
|
||||
%and = and i64 %b, 63
|
||||
%shl = shl i64 %a, %and
|
||||
%lshr = lshr i64 %a, %and
|
||||
|
@ -160,12 +302,58 @@ define amdgpu_ps i64 @s_csh_64(i64 inreg %a, i64 inreg %b) {
|
|||
ret i64 %ret
|
||||
}
|
||||
|
||||
define amdgpu_ps i64 @s_csh_64_1(i64 inreg %a, i64 inreg %b) {
|
||||
; CHECK-LABEL: s_csh_64_1:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: s_lshl_b64 s[4:5], s[0:1], s2
|
||||
; CHECK-NEXT: s_lshr_b64 s[6:7], s[0:1], s2
|
||||
; CHECK-NEXT: s_ashr_i64 s[0:1], s[0:1], s2
|
||||
; CHECK-NEXT: s_add_u32 s2, s4, s6
|
||||
; CHECK-NEXT: s_addc_u32 s3, s5, s7
|
||||
; CHECK-NEXT: s_add_u32 s0, s2, s0
|
||||
; CHECK-NEXT: s_addc_u32 s1, s3, s1
|
||||
; CHECK-NEXT: ; return to shader part epilog
|
||||
;
|
||||
; GISEL-LABEL: s_csh_64_1:
|
||||
; GISEL: ; %bb.0:
|
||||
; GISEL-NEXT: s_mov_b64 s[4:5], 0xff
|
||||
; GISEL-NEXT: s_and_b64 s[2:3], s[2:3], s[4:5]
|
||||
; GISEL-NEXT: s_lshl_b64 s[4:5], s[0:1], s2
|
||||
; GISEL-NEXT: s_lshr_b64 s[6:7], s[0:1], s2
|
||||
; GISEL-NEXT: s_ashr_i64 s[0:1], s[0:1], s2
|
||||
; GISEL-NEXT: s_add_u32 s2, s4, s6
|
||||
; GISEL-NEXT: s_cselect_b32 s3, 1, 0
|
||||
; GISEL-NEXT: s_and_b32 s3, s3, 1
|
||||
; GISEL-NEXT: s_cmp_lg_u32 s3, 0
|
||||
; GISEL-NEXT: s_addc_u32 s3, s5, s7
|
||||
; GISEL-NEXT: s_add_u32 s0, s2, s0
|
||||
; GISEL-NEXT: s_cselect_b32 s2, 1, 0
|
||||
; GISEL-NEXT: s_and_b32 s2, s2, 1
|
||||
; GISEL-NEXT: s_cmp_lg_u32 s2, 0
|
||||
; GISEL-NEXT: s_addc_u32 s1, s3, s1
|
||||
; GISEL-NEXT: ; return to shader part epilog
|
||||
%and = and i64 %b, 255
|
||||
%shl = shl i64 %a, %and
|
||||
%lshr = lshr i64 %a, %and
|
||||
%ashr = ashr i64 %a, %and
|
||||
%ret.0 = add i64 %shl, %lshr
|
||||
%ret = add i64 %ret.0, %ashr
|
||||
ret i64 %ret
|
||||
}
|
||||
|
||||
define i32 @cshl_or(i32 %a, i32 %b) {
|
||||
; CHECK-LABEL: cshl_or:
|
||||
; CHECK: ; %bb.0:
|
||||
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; CHECK-NEXT: v_lshl_or_b32 v0, v0, v1, v0
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
;
|
||||
; GISEL-LABEL: cshl_or:
|
||||
; GISEL: ; %bb.0:
|
||||
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GISEL-NEXT: v_and_b32_e32 v1, 31, v1
|
||||
; GISEL-NEXT: v_lshl_or_b32 v0, v0, v1, v0
|
||||
; GISEL-NEXT: s_setpc_b64 s[30:31]
|
||||
%and = and i32 %b, 31
|
||||
%shl = shl i32 %a, %and
|
||||
%or = or i32 %shl, %a
|
||||
|
@ -178,6 +366,13 @@ define i32 @cshl_add(i32 %a, i32 %b, i32 %c) {
|
|||
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; CHECK-NEXT: v_lshl_add_u32 v0, v0, v1, v2
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
;
|
||||
; GISEL-LABEL: cshl_add:
|
||||
; GISEL: ; %bb.0:
|
||||
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GISEL-NEXT: v_and_b32_e32 v1, 31, v1
|
||||
; GISEL-NEXT: v_lshl_add_u32 v0, v0, v1, v2
|
||||
; GISEL-NEXT: s_setpc_b64 s[30:31]
|
||||
%and = and i32 %b, 31
|
||||
%shl = shl i32 %a, %and
|
||||
%add = add i32 %shl, %c
|
||||
|
@ -190,6 +385,13 @@ define i32 @add_cshl(i32 %a, i32 %b) {
|
|||
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; CHECK-NEXT: v_add_lshl_u32 v0, v0, v1, v1
|
||||
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
||||
;
|
||||
; GISEL-LABEL: add_cshl:
|
||||
; GISEL: ; %bb.0:
|
||||
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GISEL-NEXT: v_and_b32_e32 v2, 31, v1
|
||||
; GISEL-NEXT: v_add_lshl_u32 v0, v0, v1, v2
|
||||
; GISEL-NEXT: s_setpc_b64 s[30:31]
|
||||
%add = add i32 %a, %b
|
||||
%and = and i32 %b, 31
|
||||
%shl = shl i32 %add, %and
|
||||
|
|
|
@ -115,7 +115,6 @@ define i32 @bzhi32_c1_indexzext(i32 %val, i8 %numlowbits) nounwind {
|
|||
; SI: ; %bb.0:
|
||||
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; SI-NEXT: v_sub_i32_e32 v1, vcc, 32, v1
|
||||
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
|
||||
; SI-NEXT: v_lshr_b32_e32 v1, -1, v1
|
||||
; SI-NEXT: v_and_b32_e32 v0, v1, v0
|
||||
; SI-NEXT: s_setpc_b64 s[30:31]
|
||||
|
@ -124,8 +123,7 @@ define i32 @bzhi32_c1_indexzext(i32 %val, i8 %numlowbits) nounwind {
|
|||
; VI: ; %bb.0:
|
||||
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; VI-NEXT: v_sub_u16_e32 v1, 32, v1
|
||||
; VI-NEXT: v_mov_b32_e32 v2, -1
|
||||
; VI-NEXT: v_lshrrev_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
|
||||
; VI-NEXT: v_lshrrev_b32_e64 v1, v1, -1
|
||||
; VI-NEXT: v_and_b32_e32 v0, v1, v0
|
||||
; VI-NEXT: s_setpc_b64 s[30:31]
|
||||
%numhighbits = sub i8 32, %numlowbits
|
||||
|
@ -168,7 +166,6 @@ define i32 @bzhi32_d1_indexzext(i32 %val, i8 %numlowbits) nounwind {
|
|||
; SI: ; %bb.0:
|
||||
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; SI-NEXT: v_sub_i32_e32 v1, vcc, 32, v1
|
||||
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
|
||||
; SI-NEXT: v_lshl_b32_e32 v0, v0, v1
|
||||
; SI-NEXT: v_lshr_b32_e32 v0, v0, v1
|
||||
; SI-NEXT: s_setpc_b64 s[30:31]
|
||||
|
@ -177,7 +174,6 @@ define i32 @bzhi32_d1_indexzext(i32 %val, i8 %numlowbits) nounwind {
|
|||
; VI: ; %bb.0:
|
||||
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; VI-NEXT: v_sub_u16_e32 v1, 32, v1
|
||||
; VI-NEXT: v_and_b32_e32 v1, 0xff, v1
|
||||
; VI-NEXT: v_lshlrev_b32_e32 v0, v1, v0
|
||||
; VI-NEXT: v_lshrrev_b32_e32 v0, v1, v0
|
||||
; VI-NEXT: s_setpc_b64 s[30:31]
|
||||
|
|
|
@ -1530,7 +1530,6 @@ define void @shl_inline_imm_2.0_i16(i16 addrspace(1)* %out, i16 %x) {
|
|||
; SI: ; %bb.0:
|
||||
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; SI-NEXT: s_mov_b32 s6, 0
|
||||
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
|
||||
; SI-NEXT: s_mov_b32 s7, 0xf000
|
||||
; SI-NEXT: s_mov_b32 s4, s6
|
||||
; SI-NEXT: s_mov_b32 s5, s6
|
||||
|
@ -1566,7 +1565,6 @@ define void @shl_inline_imm_neg_2.0_i16(i16 addrspace(1)* %out, i16 %x) {
|
|||
; SI: ; %bb.0:
|
||||
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; SI-NEXT: s_mov_b32 s6, 0
|
||||
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
|
||||
; SI-NEXT: s_mov_b32 s7, 0xf000
|
||||
; SI-NEXT: s_mov_b32 s4, s6
|
||||
; SI-NEXT: s_mov_b32 s5, s6
|
||||
|
|
|
@ -20,18 +20,16 @@ define amdgpu_kernel void @s_lshr_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16>
|
|||
; VI-LABEL: s_lshr_v2i16:
|
||||
; VI: ; %bb.0:
|
||||
; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
|
||||
; VI-NEXT: s_load_dword s5, s[0:1], 0x2c
|
||||
; VI-NEXT: s_load_dword s4, s[0:1], 0x2c
|
||||
; VI-NEXT: s_load_dword s0, s[0:1], 0x30
|
||||
; VI-NEXT: s_mov_b32 s4, 0xffff
|
||||
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
||||
; VI-NEXT: v_mov_b32_e32 v0, s2
|
||||
; VI-NEXT: s_lshr_b32 s1, s5, 16
|
||||
; VI-NEXT: s_lshr_b32 s6, s0, 16
|
||||
; VI-NEXT: s_and_b32 s5, s5, s4
|
||||
; VI-NEXT: s_and_b32 s0, s0, s4
|
||||
; VI-NEXT: s_lshr_b32 s1, s1, s6
|
||||
; VI-NEXT: s_lshr_b32 s0, s5, s0
|
||||
; VI-NEXT: s_lshl_b32 s1, s1, 16
|
||||
; VI-NEXT: s_and_b32 s1, s4, 0xffff
|
||||
; VI-NEXT: s_lshr_b32 s4, s4, 16
|
||||
; VI-NEXT: s_lshr_b32 s5, s0, 16
|
||||
; VI-NEXT: s_lshr_b32 s4, s4, s5
|
||||
; VI-NEXT: s_lshr_b32 s0, s1, s0
|
||||
; VI-NEXT: s_lshl_b32 s1, s4, 16
|
||||
; VI-NEXT: s_or_b32 s0, s0, s1
|
||||
; VI-NEXT: v_mov_b32_e32 v1, s3
|
||||
; VI-NEXT: v_mov_b32_e32 v2, s0
|
||||
|
@ -43,18 +41,16 @@ define amdgpu_kernel void @s_lshr_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16>
|
|||
; CI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
|
||||
; CI-NEXT: s_load_dword s2, s[0:1], 0xb
|
||||
; CI-NEXT: s_load_dword s0, s[0:1], 0xc
|
||||
; CI-NEXT: s_mov_b32 s3, 0xffff
|
||||
; CI-NEXT: s_mov_b32 s7, 0xf000
|
||||
; CI-NEXT: s_mov_b32 s6, -1
|
||||
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
||||
; CI-NEXT: s_lshr_b32 s1, s2, 16
|
||||
; CI-NEXT: s_lshr_b32 s8, s0, 16
|
||||
; CI-NEXT: s_and_b32 s2, s2, s3
|
||||
; CI-NEXT: s_and_b32 s0, s0, s3
|
||||
; CI-NEXT: s_lshr_b32 s1, s1, s8
|
||||
; CI-NEXT: s_lshr_b32 s0, s2, s0
|
||||
; CI-NEXT: s_lshl_b32 s1, s1, 16
|
||||
; CI-NEXT: s_or_b32 s0, s0, s1
|
||||
; CI-NEXT: s_and_b32 s1, s2, 0xffff
|
||||
; CI-NEXT: s_lshr_b32 s2, s2, 16
|
||||
; CI-NEXT: s_lshr_b32 s3, s0, 16
|
||||
; CI-NEXT: s_lshr_b32 s2, s2, s3
|
||||
; CI-NEXT: s_lshl_b32 s2, s2, 16
|
||||
; CI-NEXT: s_lshr_b32 s0, s1, s0
|
||||
; CI-NEXT: s_or_b32 s0, s0, s2
|
||||
; CI-NEXT: v_mov_b32_e32 v0, s0
|
||||
; CI-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
||||
; CI-NEXT: s_endpgm
|
||||
|
@ -121,14 +117,12 @@ define amdgpu_kernel void @v_lshr_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16>
|
|||
; CI-NEXT: s_mov_b64 s[0:1], s[6:7]
|
||||
; CI-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64
|
||||
; CI-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4
|
||||
; CI-NEXT: s_mov_b32 s0, 0xffff
|
||||
; CI-NEXT: s_mov_b64 s[6:7], s[2:3]
|
||||
; CI-NEXT: s_waitcnt vmcnt(1)
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v4, 16, v2
|
||||
; CI-NEXT: v_and_b32_e32 v2, s0, v2
|
||||
; CI-NEXT: v_and_b32_e32 v2, 0xffff, v2
|
||||
; CI-NEXT: s_waitcnt vmcnt(0)
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v5, 16, v3
|
||||
; CI-NEXT: v_and_b32_e32 v3, s0, v3
|
||||
; CI-NEXT: v_lshr_b32_e32 v2, v2, v3
|
||||
; CI-NEXT: v_lshr_b32_e32 v3, v4, v5
|
||||
; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
||||
|
@ -206,14 +200,12 @@ define amdgpu_kernel void @lshr_v_s_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16
|
|||
; CI-NEXT: s_mov_b64 s[0:1], s[6:7]
|
||||
; CI-NEXT: v_mov_b32_e32 v1, 0
|
||||
; CI-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64
|
||||
; CI-NEXT: s_mov_b32 s0, 0xffff
|
||||
; CI-NEXT: s_lshr_b32 s1, s8, 16
|
||||
; CI-NEXT: s_and_b32 s8, s8, s0
|
||||
; CI-NEXT: s_lshr_b32 s0, s8, 16
|
||||
; CI-NEXT: s_mov_b64 s[6:7], s[2:3]
|
||||
; CI-NEXT: s_waitcnt vmcnt(0)
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v2
|
||||
; CI-NEXT: v_and_b32_e32 v2, s0, v2
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v3, s1, v3
|
||||
; CI-NEXT: v_and_b32_e32 v2, 0xffff, v2
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v3, s0, v3
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v2, s8, v2
|
||||
; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
||||
; CI-NEXT: v_or_b32_e32 v2, v2, v3
|
||||
|
@ -287,15 +279,13 @@ define amdgpu_kernel void @lshr_s_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16
|
|||
; CI-NEXT: s_mov_b64 s[0:1], s[6:7]
|
||||
; CI-NEXT: v_mov_b32_e32 v1, 0
|
||||
; CI-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64
|
||||
; CI-NEXT: s_mov_b32 s0, 0xffff
|
||||
; CI-NEXT: s_lshr_b32 s1, s8, 16
|
||||
; CI-NEXT: s_and_b32 s8, s8, s0
|
||||
; CI-NEXT: s_lshr_b32 s0, s8, 16
|
||||
; CI-NEXT: s_and_b32 s1, s8, 0xffff
|
||||
; CI-NEXT: s_mov_b64 s[6:7], s[2:3]
|
||||
; CI-NEXT: s_waitcnt vmcnt(0)
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v2
|
||||
; CI-NEXT: v_and_b32_e32 v2, s0, v2
|
||||
; CI-NEXT: v_lshr_b32_e32 v3, s1, v3
|
||||
; CI-NEXT: v_lshr_b32_e32 v2, s8, v2
|
||||
; CI-NEXT: v_lshr_b32_e32 v3, s0, v3
|
||||
; CI-NEXT: v_lshr_b32_e32 v2, s1, v2
|
||||
; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
||||
; CI-NEXT: v_or_b32_e32 v2, v2, v3
|
||||
; CI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
|
||||
|
@ -367,7 +357,6 @@ define amdgpu_kernel void @lshr_imm_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i
|
|||
; CI-NEXT: s_mov_b64 s[6:7], s[2:3]
|
||||
; CI-NEXT: s_waitcnt vmcnt(0)
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v2
|
||||
; CI-NEXT: v_and_b32_e32 v2, 0xffff, v2
|
||||
; CI-NEXT: v_lshr_b32_e32 v3, 8, v3
|
||||
; CI-NEXT: v_lshr_b32_e32 v2, 8, v2
|
||||
; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
||||
|
@ -522,9 +511,7 @@ define amdgpu_kernel void @v_lshr_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16>
|
|||
; CI-NEXT: v_and_b32_e32 v3, s0, v3
|
||||
; CI-NEXT: s_waitcnt vmcnt(0)
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v8, 16, v4
|
||||
; CI-NEXT: v_and_b32_e32 v4, s0, v4
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v9, 16, v5
|
||||
; CI-NEXT: v_and_b32_e32 v5, s0, v5
|
||||
; CI-NEXT: v_lshr_b32_e32 v3, v3, v5
|
||||
; CI-NEXT: v_lshr_b32_e32 v5, v7, v9
|
||||
; CI-NEXT: v_lshr_b32_e32 v2, v2, v4
|
||||
|
|
|
@ -233,7 +233,6 @@ define amdgpu_kernel void @shl_i16_v_s(i16 addrspace(1)* %out, i16 addrspace(1)*
|
|||
; SI-NEXT: s_mov_b32 s6, s2
|
||||
; SI-NEXT: s_mov_b32 s7, s3
|
||||
; SI-NEXT: buffer_load_ushort v0, off, s[4:7], 0
|
||||
; SI-NEXT: s_and_b32 s8, s8, 0xffff
|
||||
; SI-NEXT: s_waitcnt vmcnt(0)
|
||||
; SI-NEXT: v_lshlrev_b32_e32 v0, s8, v0
|
||||
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
|
||||
|
@ -253,9 +252,8 @@ define amdgpu_kernel void @shl_i16_v_s(i16 addrspace(1)* %out, i16 addrspace(1)*
|
|||
; VI-NEXT: s_mov_b32 s6, s2
|
||||
; VI-NEXT: s_mov_b32 s7, s3
|
||||
; VI-NEXT: buffer_load_ushort v0, off, s[4:7], 0
|
||||
; VI-NEXT: s_and_b32 s4, s8, 0xffff
|
||||
; VI-NEXT: s_waitcnt vmcnt(0)
|
||||
; VI-NEXT: v_lshlrev_b32_e32 v0, s4, v0
|
||||
; VI-NEXT: v_lshlrev_b32_e32 v0, s8, v0
|
||||
; VI-NEXT: buffer_store_short v0, off, s[0:3], 0
|
||||
; VI-NEXT: s_endpgm
|
||||
;
|
||||
|
@ -309,9 +307,8 @@ define amdgpu_kernel void @shl_i16_v_compute_s(i16 addrspace(1)* %out, i16 addrs
|
|||
; SI-NEXT: s_mov_b32 s7, s3
|
||||
; SI-NEXT: buffer_load_ushort v0, off, s[4:7], 0
|
||||
; SI-NEXT: s_add_i32 s8, s8, 3
|
||||
; SI-NEXT: s_and_b32 s4, s8, 0xffff
|
||||
; SI-NEXT: s_waitcnt vmcnt(0)
|
||||
; SI-NEXT: v_lshlrev_b32_e32 v0, s4, v0
|
||||
; SI-NEXT: v_lshlrev_b32_e32 v0, s8, v0
|
||||
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
|
||||
; SI-NEXT: s_endpgm
|
||||
;
|
||||
|
@ -330,9 +327,8 @@ define amdgpu_kernel void @shl_i16_v_compute_s(i16 addrspace(1)* %out, i16 addrs
|
|||
; VI-NEXT: s_mov_b32 s7, s3
|
||||
; VI-NEXT: buffer_load_ushort v0, off, s[4:7], 0
|
||||
; VI-NEXT: s_add_i32 s8, s8, 3
|
||||
; VI-NEXT: s_and_b32 s4, s8, 0xffff
|
||||
; VI-NEXT: s_waitcnt vmcnt(0)
|
||||
; VI-NEXT: v_lshlrev_b32_e32 v0, s4, v0
|
||||
; VI-NEXT: v_lshlrev_b32_e32 v0, s8, v0
|
||||
; VI-NEXT: buffer_store_short v0, off, s[0:3], 0
|
||||
; VI-NEXT: s_endpgm
|
||||
;
|
||||
|
@ -401,7 +397,6 @@ define amdgpu_kernel void @shl_i16_computed_amount(i16 addrspace(1)* %out, i16 a
|
|||
; SI-NEXT: s_mov_b32 s0, s4
|
||||
; SI-NEXT: s_mov_b32 s1, s5
|
||||
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
|
||||
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
|
||||
; SI-NEXT: v_lshl_b32_e32 v0, v2, v0
|
||||
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
|
||||
; SI-NEXT: s_endpgm
|
||||
|
@ -557,17 +552,15 @@ define amdgpu_kernel void @shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> add
|
|||
; SI-NEXT: s_mov_b64 s[12:13], s[6:7]
|
||||
; SI-NEXT: buffer_load_dword v2, off, s[8:11], 0
|
||||
; SI-NEXT: buffer_load_dword v0, v[0:1], s[12:15], 0 addr64 offset:4
|
||||
; SI-NEXT: s_mov_b32 s6, 0xffff
|
||||
; SI-NEXT: s_mov_b32 s0, s4
|
||||
; SI-NEXT: s_mov_b32 s1, s5
|
||||
; SI-NEXT: s_waitcnt vmcnt(1)
|
||||
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v2
|
||||
; SI-NEXT: s_waitcnt vmcnt(0)
|
||||
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v0
|
||||
; SI-NEXT: v_and_b32_e32 v0, s6, v0
|
||||
; SI-NEXT: v_lshl_b32_e32 v0, v2, v0
|
||||
; SI-NEXT: v_lshl_b32_e32 v1, v1, v3
|
||||
; SI-NEXT: v_and_b32_e32 v0, s6, v0
|
||||
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
|
||||
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
||||
; SI-NEXT: v_or_b32_e32 v0, v0, v1
|
||||
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
||||
|
@ -660,20 +653,18 @@ define amdgpu_kernel void @shl_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> add
|
|||
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v2
|
||||
; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v3
|
||||
; SI-NEXT: s_waitcnt vmcnt(0)
|
||||
; SI-NEXT: v_and_b32_e32 v8, s0, v4
|
||||
; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4
|
||||
; SI-NEXT: v_and_b32_e32 v9, s0, v5
|
||||
; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5
|
||||
; SI-NEXT: v_lshl_b32_e32 v5, v7, v5
|
||||
; SI-NEXT: v_lshl_b32_e32 v3, v3, v9
|
||||
; SI-NEXT: v_lshl_b32_e32 v4, v6, v4
|
||||
; SI-NEXT: v_lshl_b32_e32 v2, v2, v8
|
||||
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
|
||||
; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v4
|
||||
; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v5
|
||||
; SI-NEXT: v_lshl_b32_e32 v3, v3, v5
|
||||
; SI-NEXT: v_lshl_b32_e32 v2, v2, v4
|
||||
; SI-NEXT: v_lshl_b32_e32 v4, v7, v9
|
||||
; SI-NEXT: v_lshl_b32_e32 v5, v6, v8
|
||||
; SI-NEXT: v_and_b32_e32 v3, s0, v3
|
||||
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
|
||||
; SI-NEXT: v_and_b32_e32 v2, s0, v2
|
||||
; SI-NEXT: v_or_b32_e32 v3, v3, v5
|
||||
; SI-NEXT: v_or_b32_e32 v2, v2, v4
|
||||
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
|
||||
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
|
||||
; SI-NEXT: v_or_b32_e32 v3, v3, v4
|
||||
; SI-NEXT: v_or_b32_e32 v2, v2, v5
|
||||
; SI-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
|
||||
; SI-NEXT: s_endpgm
|
||||
;
|
||||
|
|
|
@ -23,19 +23,18 @@ define amdgpu_kernel void @s_shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %
|
|||
; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
|
||||
; VI-NEXT: s_load_dword s2, s[0:1], 0x2c
|
||||
; VI-NEXT: s_load_dword s0, s[0:1], 0x30
|
||||
; VI-NEXT: s_mov_b32 s3, 0xffff
|
||||
; VI-NEXT: s_mov_b32 s1, 0xffff
|
||||
; VI-NEXT: s_mov_b32 s7, 0xf000
|
||||
; VI-NEXT: s_mov_b32 s6, -1
|
||||
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
||||
; VI-NEXT: s_lshr_b32 s1, s2, 16
|
||||
; VI-NEXT: s_and_b32 s2, s2, s3
|
||||
; VI-NEXT: s_and_b32 s3, s2, s1
|
||||
; VI-NEXT: s_lshr_b32 s2, s2, 16
|
||||
; VI-NEXT: s_lshr_b32 s8, s0, 16
|
||||
; VI-NEXT: s_and_b32 s0, s0, s3
|
||||
; VI-NEXT: s_lshl_b32 s0, s2, s0
|
||||
; VI-NEXT: s_lshl_b32 s1, s1, s8
|
||||
; VI-NEXT: s_lshl_b32 s1, s1, 16
|
||||
; VI-NEXT: s_and_b32 s0, s0, s3
|
||||
; VI-NEXT: s_or_b32 s0, s0, s1
|
||||
; VI-NEXT: s_lshl_b32 s2, s2, s8
|
||||
; VI-NEXT: s_lshl_b32 s0, s3, s0
|
||||
; VI-NEXT: s_lshl_b32 s2, s2, 16
|
||||
; VI-NEXT: s_and_b32 s0, s0, s1
|
||||
; VI-NEXT: s_or_b32 s0, s0, s2
|
||||
; VI-NEXT: v_mov_b32_e32 v0, s0
|
||||
; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
||||
; VI-NEXT: s_endpgm
|
||||
|
@ -45,18 +44,16 @@ define amdgpu_kernel void @s_shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %
|
|||
; CI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
|
||||
; CI-NEXT: s_load_dword s2, s[0:1], 0xb
|
||||
; CI-NEXT: s_load_dword s0, s[0:1], 0xc
|
||||
; CI-NEXT: s_mov_b32 s3, 0xffff
|
||||
; CI-NEXT: s_mov_b32 s7, 0xf000
|
||||
; CI-NEXT: s_mov_b32 s6, -1
|
||||
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
||||
; CI-NEXT: s_lshr_b32 s1, s2, 16
|
||||
; CI-NEXT: s_and_b32 s8, s0, s3
|
||||
; CI-NEXT: s_lshr_b32 s0, s0, 16
|
||||
; CI-NEXT: s_lshl_b32 s0, s1, s0
|
||||
; CI-NEXT: s_lshl_b32 s1, s2, s8
|
||||
; CI-NEXT: s_lshl_b32 s0, s0, 16
|
||||
; CI-NEXT: s_and_b32 s1, s1, s3
|
||||
; CI-NEXT: s_or_b32 s0, s1, s0
|
||||
; CI-NEXT: s_lshr_b32 s3, s0, 16
|
||||
; CI-NEXT: s_lshl_b32 s1, s1, s3
|
||||
; CI-NEXT: s_lshl_b32 s0, s2, s0
|
||||
; CI-NEXT: s_lshl_b32 s1, s1, 16
|
||||
; CI-NEXT: s_and_b32 s0, s0, 0xffff
|
||||
; CI-NEXT: s_or_b32 s0, s0, s1
|
||||
; CI-NEXT: v_mov_b32_e32 v0, s0
|
||||
; CI-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
||||
; CI-NEXT: s_endpgm
|
||||
|
@ -124,17 +121,15 @@ define amdgpu_kernel void @v_shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> a
|
|||
; CI-NEXT: s_mov_b64 s[0:1], s[6:7]
|
||||
; CI-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64
|
||||
; CI-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4
|
||||
; CI-NEXT: s_mov_b32 s0, 0xffff
|
||||
; CI-NEXT: s_mov_b64 s[6:7], s[2:3]
|
||||
; CI-NEXT: s_waitcnt vmcnt(1)
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v4, 16, v2
|
||||
; CI-NEXT: s_waitcnt vmcnt(0)
|
||||
; CI-NEXT: v_and_b32_e32 v5, s0, v3
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
|
||||
; CI-NEXT: v_lshl_b32_e32 v3, v4, v3
|
||||
; CI-NEXT: v_lshl_b32_e32 v2, v2, v5
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v5, 16, v3
|
||||
; CI-NEXT: v_lshl_b32_e32 v2, v2, v3
|
||||
; CI-NEXT: v_lshl_b32_e32 v3, v4, v5
|
||||
; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
||||
; CI-NEXT: v_and_b32_e32 v2, s0, v2
|
||||
; CI-NEXT: v_and_b32_e32 v2, 0xffff, v2
|
||||
; CI-NEXT: v_or_b32_e32 v2, v2, v3
|
||||
; CI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
|
||||
; CI-NEXT: s_endpgm
|
||||
|
@ -209,15 +204,13 @@ define amdgpu_kernel void @shl_v_s_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16>
|
|||
; CI-NEXT: s_mov_b64 s[0:1], s[6:7]
|
||||
; CI-NEXT: v_mov_b32_e32 v1, 0
|
||||
; CI-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64
|
||||
; CI-NEXT: s_mov_b32 s0, 0xffff
|
||||
; CI-NEXT: s_lshr_b32 s1, s8, 16
|
||||
; CI-NEXT: s_and_b32 s8, s8, s0
|
||||
; CI-NEXT: s_lshr_b32 s0, s8, 16
|
||||
; CI-NEXT: s_mov_b64 s[6:7], s[2:3]
|
||||
; CI-NEXT: s_waitcnt vmcnt(0)
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v2
|
||||
; CI-NEXT: v_lshlrev_b32_e32 v2, s8, v2
|
||||
; CI-NEXT: v_lshlrev_b32_e32 v3, s1, v3
|
||||
; CI-NEXT: v_and_b32_e32 v2, s0, v2
|
||||
; CI-NEXT: v_lshlrev_b32_e32 v3, s0, v3
|
||||
; CI-NEXT: v_and_b32_e32 v2, 0xffff, v2
|
||||
; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
||||
; CI-NEXT: v_or_b32_e32 v2, v2, v3
|
||||
; CI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
|
||||
|
@ -290,17 +283,15 @@ define amdgpu_kernel void @shl_s_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16>
|
|||
; CI-NEXT: s_mov_b64 s[0:1], s[6:7]
|
||||
; CI-NEXT: v_mov_b32_e32 v1, 0
|
||||
; CI-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64
|
||||
; CI-NEXT: s_mov_b32 s0, 0xffff
|
||||
; CI-NEXT: s_lshr_b32 s1, s8, 16
|
||||
; CI-NEXT: s_lshr_b32 s0, s8, 16
|
||||
; CI-NEXT: s_mov_b64 s[6:7], s[2:3]
|
||||
; CI-NEXT: s_waitcnt vmcnt(0)
|
||||
; CI-NEXT: v_and_b32_e32 v3, s0, v2
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
|
||||
; CI-NEXT: v_lshl_b32_e32 v2, s1, v2
|
||||
; CI-NEXT: v_lshl_b32_e32 v3, s8, v3
|
||||
; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
||||
; CI-NEXT: v_and_b32_e32 v3, s0, v3
|
||||
; CI-NEXT: v_or_b32_e32 v2, v3, v2
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v2
|
||||
; CI-NEXT: v_lshl_b32_e32 v2, s8, v2
|
||||
; CI-NEXT: v_lshl_b32_e32 v3, s0, v3
|
||||
; CI-NEXT: v_and_b32_e32 v2, 0xffff, v2
|
||||
; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
||||
; CI-NEXT: v_or_b32_e32 v2, v2, v3
|
||||
; CI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
|
||||
; CI-NEXT: s_endpgm
|
||||
;
|
||||
|
@ -369,13 +360,12 @@ define amdgpu_kernel void @shl_imm_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i1
|
|||
; CI-NEXT: buffer_load_dword v2, v[0:1], s[0:3], 0 addr64
|
||||
; CI-NEXT: s_mov_b64 s[6:7], s[2:3]
|
||||
; CI-NEXT: s_waitcnt vmcnt(0)
|
||||
; CI-NEXT: v_and_b32_e32 v3, 0xffff, v2
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v2
|
||||
; CI-NEXT: v_lshl_b32_e32 v2, 8, v2
|
||||
; CI-NEXT: v_lshl_b32_e32 v3, 8, v3
|
||||
; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
||||
; CI-NEXT: v_and_b32_e32 v3, 0xfff8, v3
|
||||
; CI-NEXT: v_or_b32_e32 v2, v3, v2
|
||||
; CI-NEXT: v_and_b32_e32 v2, 0xfff8, v2
|
||||
; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
||||
; CI-NEXT: v_or_b32_e32 v2, v2, v3
|
||||
; CI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
|
||||
; CI-NEXT: s_endpgm
|
||||
;
|
||||
|
@ -524,20 +514,18 @@ define amdgpu_kernel void @v_shl_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> a
|
|||
; CI-NEXT: v_lshrrev_b32_e32 v6, 16, v2
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v7, 16, v3
|
||||
; CI-NEXT: s_waitcnt vmcnt(0)
|
||||
; CI-NEXT: v_and_b32_e32 v8, s0, v4
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v4, 16, v4
|
||||
; CI-NEXT: v_and_b32_e32 v9, s0, v5
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v5, 16, v5
|
||||
; CI-NEXT: v_lshl_b32_e32 v5, v7, v5
|
||||
; CI-NEXT: v_lshl_b32_e32 v3, v3, v9
|
||||
; CI-NEXT: v_lshl_b32_e32 v4, v6, v4
|
||||
; CI-NEXT: v_lshl_b32_e32 v2, v2, v8
|
||||
; CI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v8, 16, v4
|
||||
; CI-NEXT: v_lshrrev_b32_e32 v9, 16, v5
|
||||
; CI-NEXT: v_lshl_b32_e32 v3, v3, v5
|
||||
; CI-NEXT: v_lshl_b32_e32 v2, v2, v4
|
||||
; CI-NEXT: v_lshl_b32_e32 v4, v7, v9
|
||||
; CI-NEXT: v_lshl_b32_e32 v5, v6, v8
|
||||
; CI-NEXT: v_and_b32_e32 v3, s0, v3
|
||||
; CI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
|
||||
; CI-NEXT: v_and_b32_e32 v2, s0, v2
|
||||
; CI-NEXT: v_or_b32_e32 v3, v3, v5
|
||||
; CI-NEXT: v_or_b32_e32 v2, v2, v4
|
||||
; CI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
|
||||
; CI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
|
||||
; CI-NEXT: v_or_b32_e32 v3, v3, v4
|
||||
; CI-NEXT: v_or_b32_e32 v2, v2, v5
|
||||
; CI-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
|
||||
; CI-NEXT: s_endpgm
|
||||
;
|
||||
|
|
|
@ -157,18 +157,16 @@ define amdgpu_kernel void @ashr_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> ad
|
|||
; SI-NEXT: s_mov_b32 s8, s6
|
||||
; SI-NEXT: s_mov_b32 s9, s7
|
||||
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
|
||||
; SI-NEXT: s_mov_b32 s6, 0xffff
|
||||
; SI-NEXT: s_mov_b32 s0, s4
|
||||
; SI-NEXT: s_mov_b32 s1, s5
|
||||
; SI-NEXT: s_waitcnt vmcnt(0)
|
||||
; SI-NEXT: v_bfe_i32 v2, v0, 0, 16
|
||||
; SI-NEXT: v_ashrrev_i32_e32 v0, 16, v0
|
||||
; SI-NEXT: v_and_b32_e32 v3, s6, v1
|
||||
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
|
||||
; SI-NEXT: v_ashrrev_i32_e32 v0, v1, v0
|
||||
; SI-NEXT: v_ashrrev_i32_e32 v1, v3, v2
|
||||
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1
|
||||
; SI-NEXT: v_ashrrev_i32_e32 v0, v3, v0
|
||||
; SI-NEXT: v_ashrrev_i32_e32 v1, v1, v2
|
||||
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
||||
; SI-NEXT: v_and_b32_e32 v1, s6, v1
|
||||
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
|
||||
; SI-NEXT: v_or_b32_e32 v0, v1, v0
|
||||
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
||||
; SI-NEXT: s_endpgm
|
||||
|
@ -253,14 +251,12 @@ define amdgpu_kernel void @ashr_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> ad
|
|||
; SI-NEXT: v_ashrrev_i32_e32 v0, 16, v0
|
||||
; SI-NEXT: v_bfe_i32 v5, v1, 0, 16
|
||||
; SI-NEXT: v_ashrrev_i32_e32 v1, 16, v1
|
||||
; SI-NEXT: v_and_b32_e32 v6, s6, v2
|
||||
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
|
||||
; SI-NEXT: v_and_b32_e32 v7, s6, v3
|
||||
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
|
||||
; SI-NEXT: v_ashrrev_i32_e32 v1, v3, v1
|
||||
; SI-NEXT: v_ashrrev_i32_e32 v3, v7, v5
|
||||
; SI-NEXT: v_ashrrev_i32_e32 v0, v2, v0
|
||||
; SI-NEXT: v_ashrrev_i32_e32 v2, v6, v4
|
||||
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v2
|
||||
; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v3
|
||||
; SI-NEXT: v_ashrrev_i32_e32 v1, v7, v1
|
||||
; SI-NEXT: v_ashrrev_i32_e32 v3, v3, v5
|
||||
; SI-NEXT: v_ashrrev_i32_e32 v0, v6, v0
|
||||
; SI-NEXT: v_ashrrev_i32_e32 v2, v2, v4
|
||||
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
||||
; SI-NEXT: v_and_b32_e32 v3, s6, v3
|
||||
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
||||
|
|
Loading…
Reference in New Issue