[AMDGPU] Fix for folding v2.16 literals.

It was found some packed immediate operands (e.g. `<half 1.0, half 2.0>`) are
incorrectly processed so one of two packed values were lost.

Introduced new function to check immediate 32-bit operand can be folded.
Converted condition about current op_sel flags value to fall-through.

Fixes: SWDEV-247595

Reviewed By: rampitec

Differential Revision: https://reviews.llvm.org/D87158
This commit is contained in:
dfukalov 2020-09-04 22:44:01 +03:00
parent 8b7c8f2c54
commit c259d3a061
4 changed files with 40 additions and 24 deletions

View File

@ -192,8 +192,8 @@ static bool updateOperand(FoldCandidate &Fold,
if (Fold.isImm()) {
if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked &&
!(MI->getDesc().TSFlags & SIInstrFlags::IsMAI) &&
AMDGPU::isInlinableLiteralV216(static_cast<uint16_t>(Fold.ImmToFold),
ST.hasInv2PiInlineImm())) {
AMDGPU::isFoldableLiteralV216(Fold.ImmToFold,
ST.hasInv2PiInlineImm())) {
// Set op_sel/op_sel_hi on this operand or bail out if op_sel is
// already set.
unsigned Opcode = MI->getOpcode();
@ -209,30 +209,30 @@ static bool updateOperand(FoldCandidate &Fold,
ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
MachineOperand &Mod = MI->getOperand(ModIdx);
unsigned Val = Mod.getImm();
if ((Val & SISrcMods::OP_SEL_0) || !(Val & SISrcMods::OP_SEL_1))
return false;
// Only apply the following transformation if that operand requries
// a packed immediate.
switch (TII.get(Opcode).OpInfo[OpNo].OperandType) {
case AMDGPU::OPERAND_REG_IMM_V2FP16:
case AMDGPU::OPERAND_REG_IMM_V2INT16:
case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
// If upper part is all zero we do not need op_sel_hi.
if (!isUInt<16>(Fold.ImmToFold)) {
if (!(Fold.ImmToFold & 0xffff)) {
Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
if (!(Val & SISrcMods::OP_SEL_0) && (Val & SISrcMods::OP_SEL_1)) {
// Only apply the following transformation if that operand requries
// a packed immediate.
switch (TII.get(Opcode).OpInfo[OpNo].OperandType) {
case AMDGPU::OPERAND_REG_IMM_V2FP16:
case AMDGPU::OPERAND_REG_IMM_V2INT16:
case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
// If upper part is all zero we do not need op_sel_hi.
if (!isUInt<16>(Fold.ImmToFold)) {
if (!(Fold.ImmToFold & 0xffff)) {
Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
return true;
}
Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
Old.ChangeToImmediate(Fold.ImmToFold & 0xffff);
return true;
}
Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
Old.ChangeToImmediate(Fold.ImmToFold & 0xffff);
return true;
break;
default:
break;
}
break;
default:
break;
}
}
}

View File

@ -1380,6 +1380,19 @@ bool isInlinableIntLiteralV216(int32_t Literal) {
return Lo16 == Hi16 && isInlinableIntLiteral(Lo16);
}
bool isFoldableLiteralV216(int32_t Literal, bool HasInv2Pi) {
assert(HasInv2Pi);
int16_t Lo16 = static_cast<int16_t>(Literal);
if (isInt<16>(Literal) || isUInt<16>(Literal))
return true;
int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
if (!(Literal & 0xffff))
return true;
return Lo16 == Hi16;
}
bool isArgPassedInSGPR(const Argument *A) {
const Function *F = A->getParent();

View File

@ -693,6 +693,9 @@ bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi);
LLVM_READNONE
bool isInlinableIntLiteralV216(int32_t Literal);
LLVM_READNONE
bool isFoldableLiteralV216(int32_t Literal, bool HasInv2Pi);
bool isArgPassedInSGPR(const Argument *Arg);
LLVM_READONLY

View File

@ -1026,7 +1026,7 @@ define amdgpu_kernel void @v_test_v2i16_x_sub_7_64(<2 x i16> addrspace(1)* %out,
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_load_dword v1, v0, s[2:3]
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_pk_sub_i16 v1, v1, 7 op_sel_hi:[1,0]
; GFX10-NEXT: v_pk_sub_i16 v1, v1, 0x400007
; GFX10-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@ -1100,7 +1100,7 @@ define amdgpu_kernel void @v_test_v2i16_x_sub_64_123(<2 x i16> addrspace(1)* %ou
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: global_load_dword v1, v0, s[2:3]
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_pk_sub_i16 v1, v1, 64 op_sel_hi:[1,0]
; GFX10-NEXT: v_pk_sub_i16 v1, v1, 0x7b0040
; GFX10-NEXT: global_store_dword v0, v1, s[0:1]
; GFX10-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()