AMDGPU/SI: Don't use non-0 waitcnt values when waiting on Flat instructions

Summary:
Flat instruction can return out of order, so we need always need to wait
for all the outstanding flat operations.

Reviewers: tony-tye, arsenm

Subscribers: kzhuravl, wdng, nhaehnle, llvm-commits, yaxunl

Differential Revision: https://reviews.llvm.org/D25998

llvm-svn: 285479
This commit is contained in:
Tom Stellard 2016-10-28 23:53:48 +00:00
parent 2678d023ca
commit 6695ba0440
4 changed files with 86 additions and 2 deletions

View File

@ -93,6 +93,9 @@ private:
bool LastInstWritesM0;
/// Whether or not we have flat operations outstanding.
bool IsFlatOutstanding;
/// \brief Whether the machine function returns void
bool ReturnsVoid;
@ -294,6 +297,9 @@ void SIInsertWaits::pushInstruction(MachineBasicBlock &MBB,
Counters Limit = ZeroCounts;
unsigned Sum = 0;
if (TII->mayAccessFlatAddressSpace(*I))
IsFlatOutstanding = true;
for (unsigned i = 0; i < 3; ++i) {
LastIssued.Array[i] += Increment.Array[i];
if (Increment.Array[i])
@ -368,8 +374,9 @@ bool SIInsertWaits::insertWait(MachineBasicBlock &MBB,
// Figure out if the async instructions execute in order
bool Ordered[3];
// VM_CNT is always ordered
Ordered[0] = true;
// VM_CNT is always ordered except when there are flat instructions, which
// can return out of order.
Ordered[0] = !IsFlatOutstanding;
// EXP_CNT is unordered if we have both EXP & VM-writes
Ordered[1] = ExpInstrTypesSeen == 3;
@ -419,6 +426,7 @@ bool SIInsertWaits::insertWait(MachineBasicBlock &MBB,
LastOpcodeType = OTHER;
LastInstWritesM0 = false;
IsFlatOutstanding = false;
return true;
}
@ -532,6 +540,7 @@ bool SIInsertWaits::runOnMachineFunction(MachineFunction &MF) {
LastIssued = ZeroCounts;
LastOpcodeType = OTHER;
LastInstWritesM0 = false;
IsFlatOutstanding = false;
ReturnsVoid = MF.getInfo<SIMachineFunctionInfo>()->returnsVoid();
memset(&UsedRegs, 0, sizeof(UsedRegs));

View File

@ -3540,6 +3540,20 @@ unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
}
}
bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const {
if (!isFLAT(MI))
return false;
if (MI.memoperands_empty())
return true;
for (const MachineMemOperand *MMO : MI.memoperands()) {
if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS)
return true;
}
return false;
}
ArrayRef<std::pair<int, const char *>>
SIInstrInfo::getSerializableTargetIndices() const {
static const std::pair<int, const char *> TargetIndices[] = {

View File

@ -617,6 +617,8 @@ public:
unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
bool mayAccessFlatAddressSpace(const MachineInstr &MI) const;
ArrayRef<std::pair<int, const char *>>
getSerializableTargetIndices() const override;

View File

@ -0,0 +1,59 @@
# RUN: llc -march=amdgcn -mcpu=fiji -run-pass si-insert-waits %s -o - | FileCheck %s
--- |
define void @flat_zero_waitcnt(i32 addrspace(1)* %global4,
<4 x i32> addrspace(1)* %global16,
i32 addrspace(4)* %flat4,
<4 x i32> addrspace(4)* %flat16) {
ret void
}
...
---
# CHECK-LABEL: name: flat_zero_waitcnt
# CHECK-LABEL: bb.0:
# CHECK: FLAT_LOAD_DWORD
# CHECK: FLAT_LOAD_DWORDX4
# Global loads will return in order so we should:
# s_waitcnt vmcnt(1) lgkmcnt(0)
# CHECK-NEXT: S_WAITCNT 113
# CHECK-LABEL: bb.1:
# CHECK: FLAT_LOAD_DWORD
# CHECK: FLAT_LOAD_DWORDX4
# The first load has no mem operand, so we should assume it accesses the flat
# address space.
# s_waitcnt vmcnt(0) lgkmcnt(0)
# CHECK-NEXT: S_WAITCNT 112
# CHECK-LABEL: bb.2:
# CHECK: FLAT_LOAD_DWORD
# CHECK: FLAT_LOAD_DWORDX4
# One outstand loads access the flat address space.
# s_waitcnt vmcnt(0) lgkmcnt(0)
# CHECK-NEXT: S_WAITCNT 112
name: flat_zero_waitcnt
body: |
bb.0:
successors: %bb.1
%vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.global4)
%vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 16 from %ir.global16)
%vgpr0 = V_MOV_B32_e32 %vgpr1, implicit %exec
S_BRANCH %bb.1
bb.1:
successors: %bb.2
%vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr
%vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 16 from %ir.global16)
%vgpr0 = V_MOV_B32_e32 %vgpr1, implicit %exec
S_BRANCH %bb.2
bb.2:
%vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.flat4)
%vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 16 from %ir.flat16)
%vgpr0 = V_MOV_B32_e32 %vgpr1, implicit %exec
S_ENDPGM
...