From d3a8e333ec9db769b0335cd72ed6acf0d3d0b2ba Mon Sep 17 00:00:00 2001 From: Jay Foad Date: Mon, 28 Sep 2020 14:34:23 +0100 Subject: [PATCH] [AMDGPU] Reformat SITargetLowering::isSDNodeSourceOfDivergence. NFC. --- llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 66 +++++++++++------------ 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index e3646d329118..b7b884544619 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -11675,46 +11675,40 @@ static bool isCopyFromRegOfInlineAsm(const SDNode *N) { return false; } -bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode * N, - FunctionLoweringInfo * FLI, LegacyDivergenceAnalysis * KDA) const -{ +bool SITargetLowering::isSDNodeSourceOfDivergence( + const SDNode *N, FunctionLoweringInfo *FLI, + LegacyDivergenceAnalysis *KDA) const { switch (N->getOpcode()) { - case ISD::CopyFromReg: - { - const RegisterSDNode *R = cast(N->getOperand(1)); - const MachineRegisterInfo &MRI = FLI->MF->getRegInfo(); - const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); - Register Reg = R->getReg(); + case ISD::CopyFromReg: { + const RegisterSDNode *R = cast(N->getOperand(1)); + const MachineRegisterInfo &MRI = FLI->MF->getRegInfo(); + const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); + Register Reg = R->getReg(); - // FIXME: Why does this need to consider isLiveIn? - if (Reg.isPhysical() || MRI.isLiveIn(Reg)) - return !TRI->isSGPRReg(MRI, Reg); - - if (const Value *V = FLI->getValueFromVirtualReg(R->getReg())) - return KDA->isDivergent(V); - - assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N)); + // FIXME: Why does this need to consider isLiveIn? + if (Reg.isPhysical() || MRI.isLiveIn(Reg)) return !TRI->isSGPRReg(MRI, Reg); - } - break; - case ISD::LOAD: { - const LoadSDNode *L = cast(N); - unsigned AS = L->getAddressSpace(); - // A flat load may access private memory. - return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS; - } break; - case ISD::CALLSEQ_END: - return true; - break; - case ISD::INTRINSIC_WO_CHAIN: - { - } - return AMDGPU::isIntrinsicSourceOfDivergence( - cast(N->getOperand(0))->getZExtValue()); - case ISD::INTRINSIC_W_CHAIN: - return AMDGPU::isIntrinsicSourceOfDivergence( - cast(N->getOperand(1))->getZExtValue()); + if (const Value *V = FLI->getValueFromVirtualReg(R->getReg())) + return KDA->isDivergent(V); + + assert(Reg == FLI->DemoteRegister || isCopyFromRegOfInlineAsm(N)); + return !TRI->isSGPRReg(MRI, Reg); + } + case ISD::LOAD: { + const LoadSDNode *L = cast(N); + unsigned AS = L->getAddressSpace(); + // A flat load may access private memory. + return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS; + } + case ISD::CALLSEQ_END: + return true; + case ISD::INTRINSIC_WO_CHAIN: + return AMDGPU::isIntrinsicSourceOfDivergence( + cast(N->getOperand(0))->getZExtValue()); + case ISD::INTRINSIC_W_CHAIN: + return AMDGPU::isIntrinsicSourceOfDivergence( + cast(N->getOperand(1))->getZExtValue()); } return false; }