[AMDGPU] Fix crash with sgpr spills to vgpr disabled

This would assert with amdgpu-spill-sgpr-to-vgpr disabled when trying to
spill the FP.

Fixes: SWDEV-262704

Reviewed By: RamNalamothu

Differential Revision: https://reviews.llvm.org/D95768
This commit is contained in:
Austin Kerbow 2021-02-01 08:34:57 -08:00
parent 5f3c4923e4
commit 0397dca021
3 changed files with 35 additions and 3 deletions

View File

@ -105,7 +105,7 @@ static void getVGPRSpillLaneOrTempRegister(MachineFunction &MF,
int NewFI = FrameInfo.CreateStackObject(4, Align(4), true, nullptr,
TargetStackID::SGPRSpill);
if (MFI->allocateSGPRSpillToVGPR(MF, NewFI)) {
if (TRI->spillSGPRToVGPR() && MFI->allocateSGPRSpillToVGPR(MF, NewFI)) {
// 3: There's no free lane to spill, and no free register to save FP/BP,
// so we're forced to spill another VGPR to use for the spill.
FrameIndex = NewFI;

View File

@ -11487,8 +11487,9 @@ void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
// Allocate a VGPR for future SGPR Spill if
// "amdgpu-reserve-vgpr-for-sgpr-spill" option is used
// FIXME: We won't need this hack if we split SGPR allocation from VGPR
if (VGPRReserveforSGPRSpill && !Info->VGPRReservedForSGPRSpill &&
!Info->isEntryFunction() && MF.getFrameInfo().hasStackObjects())
if (VGPRReserveforSGPRSpill && TRI->spillSGPRToVGPR() &&
!Info->VGPRReservedForSGPRSpill && !Info->isEntryFunction() &&
MF.getFrameInfo().hasStackObjects())
Info->reserveVGPRforSGPRSpills(MF);
}

View File

@ -0,0 +1,31 @@
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -amdgpu-spill-sgpr-to-vgpr=true < %s | FileCheck -check-prefixes=GCN,SPILL-TO-VGPR %s
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -amdgpu-spill-sgpr-to-vgpr=false < %s | FileCheck -check-prefixes=GCN,NO-SPILL-TO-VGPR %s
; Check frame setup where SGPR spills to VGPRs are disabled or enabled.
declare hidden void @external_void_func_void() #0
; GCN-LABEL: {{^}}callee_with_stack_and_call:
; SPILL-TO-VGPR: buffer_store_dword v40, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SPILL-TO-VGPR: v_writelane_b32 v40, s33, 2
; NO-SPILL-TO-VGPR: v_mov_b32_e32 v0, s33
; NO-SPILL-TO-VGPR: buffer_store_dword v0, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GCN: s_swappc_b64 s[30:31], s[4:5]
; SPILL-TO-VGPR: v_readlane_b32 s4, v40, 0
; SPILL-TO-VGPR: v_readlane_b32 s5, v40, 1
; NO-SPILL-TO-VGPR: v_readlane_b32 s4, v1, 0
; NO-SPILL-TO-VGPR: v_readlane_b32 s5, v1, 1
; SPILL-TO-VGPR: v_readlane_b32 s33, v40, 2
; NO-SPILL-TO-VGPR: buffer_load_dword v0, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
; NO-SPILL-TO-VGPR: v_readfirstlane_b32 s33, v0
define void @callee_with_stack_and_call() #0 {
%alloca = alloca i32, addrspace(5)
store volatile i32 0, i32 addrspace(5)* %alloca
call void @external_void_func_void()
ret void
}
attributes #0 = { nounwind }