From 201f892b3b597f24287ab6a712a286e25a45a7d9 Mon Sep 17 00:00:00 2001 From: Alexander Timofeev Date: Thu, 30 Aug 2018 13:55:04 +0000 Subject: [PATCH] [AMDGPU] Preliminary patch for divergence driven instruction selection. Operands Folding 1. Reviewers: rampitec Differential revision: https://reviews/llvm/org/D51316 llvm-svn: 341068 --- llvm/lib/Target/AMDGPU/SIFoldOperands.cpp | 27 ++++++++++++++++++++-- llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir | 22 ++++++++++++++++++ 2 files changed, 47 insertions(+), 2 deletions(-) create mode 100644 llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp index eed9a4d43f67..d4b64ab203a6 100644 --- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -438,8 +438,6 @@ void SIFoldOperands::foldOperand( bool FoldingImm = OpToFold.isImm(); - // In order to fold immediates into copies, we need to change the - // copy to a MOV. if (FoldingImm && UseMI->isCopy()) { unsigned DestReg = UseMI->getOperand(0).getReg(); const TargetRegisterClass *DestRC @@ -447,6 +445,31 @@ void SIFoldOperands::foldOperand( MRI->getRegClass(DestReg) : TRI->getPhysRegClass(DestReg); + unsigned SrcReg = UseMI->getOperand(1).getReg(); + if (TargetRegisterInfo::isVirtualRegister(DestReg) && + TargetRegisterInfo::isVirtualRegister(SrcReg)) { + const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg); + if (TRI->isSGPRClass(SrcRC) && TRI->hasVGPRs(DestRC)) { + MachineRegisterInfo::use_iterator NextUse; + SmallVector CopyUses; + for (MachineRegisterInfo::use_iterator + Use = MRI->use_begin(DestReg), E = MRI->use_end(); + Use != E; Use = NextUse) { + NextUse = std::next(Use); + FoldCandidate FC = FoldCandidate(Use->getParent(), + Use.getOperandNo(), &UseMI->getOperand(1)); + CopyUses.push_back(FC); + } + for (auto & F : CopyUses) { + foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo, + FoldList, CopiesToReplace); + } + } + } + + // In order to fold immediates into copies, we need to change the + // copy to a MOV. + unsigned MovOp = TII->getMovOpcode(DestRC); if (MovOp == AMDGPU::COPY) return; diff --git a/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir b/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir new file mode 100644 index 000000000000..bcdf466a382a --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir @@ -0,0 +1,22 @@ +# RUN: llc -march=amdgcn -run-pass si-fold-operands %s -o - | FileCheck -check-prefix=GCN %s + +# GCN-LABEL: name: fold-imm-copy +# GCN: [[SREG:%[0-9+]]]:sreg_32_xm0 = S_MOV_B32 65535 +# GCN: V_AND_B32_e32 [[SREG]] + +name: fold-imm-copy +body: | + bb.0: + liveins: $vgpr0, $sgpr0_sgpr1 + %0:vgpr_32 = COPY $vgpr0 + %1:sgpr_64 = COPY $sgpr0_sgpr1 + %2:sreg_128 = S_LOAD_DWORDX4_IMM %1, 9, 0 + %3:sreg_32_xm0 = S_MOV_B32 2 + %4:vgpr_32 = V_LSHLREV_B32_e64 killed %3, %0, implicit $exec + %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + %6:vreg_64 = REG_SEQUENCE killed %4, %subreg.sub0, killed %5, %subreg.sub1 + %7:vgpr_32 = BUFFER_LOAD_DWORD_ADDR64 %6, %2, 0, 4, 0, 0, 0, implicit $exec + %8:sreg_32_xm0 = S_MOV_B32 65535 + %9:vgpr_32 = COPY %8 + %10:vgpr_32 = V_AND_B32_e32 %7, %9, implicit $exec +...