2018-06-27 13:36:03 +08:00
|
|
|
//===-- AMDGPUAsmPrinter.cpp - AMDGPU assembly printer -------------------===//
|
2012-12-12 05:25:42 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
/// \file
|
|
|
|
///
|
|
|
|
/// The AMDGPUAsmPrinter is used to print both assembly string and also binary
|
|
|
|
/// code. When passed an MCAsmStreamer it prints assembly and when passed
|
|
|
|
/// an MCObjectStreamer it outputs binary code.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
|
|
|
|
#include "AMDGPUAsmPrinter.h"
|
|
|
|
#include "AMDGPU.h"
|
2014-06-13 09:32:00 +08:00
|
|
|
#include "AMDGPUSubtarget.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "AMDGPUTargetMachine.h"
|
|
|
|
#include "InstPrinter/AMDGPUInstPrinter.h"
|
AMDGPU: Remove #include "MCTargetDesc/AMDGPUMCTargetDesc.h" from common headers
Summary:
MCTargetDesc/AMDGPUMCTargetDesc.h contains enums for all the instuction
and register defintions, which are huge so we only want to include
them where needed.
This will also make it easier if we want to split the R600 and GCN
definitions into separate tablegenerated files.
I was unable to remove AMDGPUMCTargetDesc.h from SIMachineFunctionInfo.h
because it uses some enums from the header to initialize default values
for the SIMachineFunction class, so I ended up having to remove includes of
SIMachineFunctionInfo.h from headers too.
Reviewers: arsenm, nhaehnle
Reviewed By: nhaehnle
Subscribers: MatzeB, kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D46272
llvm-svn: 332930
2018-05-22 10:03:23 +08:00
|
|
|
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "MCTargetDesc/AMDGPUTargetStreamer.h"
|
2018-05-25 04:02:01 +08:00
|
|
|
#include "R600AsmPrinter.h"
|
2013-05-07 01:50:51 +08:00
|
|
|
#include "R600Defines.h"
|
2013-04-24 01:34:12 +08:00
|
|
|
#include "R600MachineFunctionInfo.h"
|
2013-04-17 23:17:25 +08:00
|
|
|
#include "R600RegisterInfo.h"
|
2013-05-24 01:10:37 +08:00
|
|
|
#include "SIDefines.h"
|
2016-06-21 01:51:32 +08:00
|
|
|
#include "SIInstrInfo.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "SIMachineFunctionInfo.h"
|
2013-05-24 01:10:37 +08:00
|
|
|
#include "SIRegisterInfo.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "Utils/AMDGPUBaseInfo.h"
|
2017-06-07 11:48:56 +08:00
|
|
|
#include "llvm/BinaryFormat/ELF.h"
|
2014-07-21 23:45:01 +08:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
2016-06-21 02:13:04 +08:00
|
|
|
#include "llvm/IR/DiagnosticInfo.h"
|
2013-04-16 01:51:30 +08:00
|
|
|
#include "llvm/MC/MCContext.h"
|
|
|
|
#include "llvm/MC/MCSectionELF.h"
|
2012-12-12 05:25:42 +08:00
|
|
|
#include "llvm/MC/MCStreamer.h"
|
2017-10-12 06:41:09 +08:00
|
|
|
#include "llvm/Support/AMDGPUMetadata.h"
|
2013-06-28 23:47:08 +08:00
|
|
|
#include "llvm/Support/MathExtras.h"
|
2012-12-12 05:25:42 +08:00
|
|
|
#include "llvm/Support/TargetRegistry.h"
|
2018-03-24 07:58:19 +08:00
|
|
|
#include "llvm/Target/TargetLoweringObjectFile.h"
|
2012-12-12 05:25:42 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
2017-10-12 06:41:09 +08:00
|
|
|
using namespace llvm::AMDGPU;
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2014-06-27 01:22:30 +08:00
|
|
|
// TODO: This should get the default rounding mode from the kernel. We just set
|
|
|
|
// the default here, but this could change if the OpenCL rounding mode pragmas
|
|
|
|
// are used.
|
|
|
|
//
|
|
|
|
// The denormal mode here should match what is reported by the OpenCL runtime
|
|
|
|
// for the CL_FP_DENORM bit from CL_DEVICE_{HALF|SINGLE|DOUBLE}_FP_CONFIG, but
|
|
|
|
// can also be override to flush with the -cl-denorms-are-zero compiler flag.
|
|
|
|
//
|
|
|
|
// AMD OpenCL only sets flush none and reports CL_FP_DENORM for double
|
|
|
|
// precision, and leaves single precision to flush all and does not report
|
|
|
|
// CL_FP_DENORM for CL_DEVICE_SINGLE_FP_CONFIG. Mesa's OpenCL currently reports
|
|
|
|
// CL_FP_DENORM for both.
|
2014-07-15 07:40:43 +08:00
|
|
|
//
|
|
|
|
// FIXME: It seems some instructions do not support single precision denormals
|
|
|
|
// regardless of the mode (exp_*_f32, rcp_*_f32, rsq_*_f32, rsq_*f32, sqrt_f32,
|
|
|
|
// and sin_f32, cos_f32 on most parts).
|
|
|
|
|
|
|
|
// We want to use these instructions, and using fp32 denormals also causes
|
|
|
|
// instructions to run at the double precision rate for the device so it's
|
|
|
|
// probably best to just report no single precision denormals.
|
2014-07-15 07:40:49 +08:00
|
|
|
static uint32_t getFPMode(const MachineFunction &F) {
|
2018-07-12 04:59:01 +08:00
|
|
|
const GCNSubtarget& ST = F.getSubtarget<GCNSubtarget>();
|
2014-07-15 07:40:49 +08:00
|
|
|
// TODO: Is there any real use for the flush in only / flush out only modes?
|
|
|
|
|
|
|
|
uint32_t FP32Denormals =
|
|
|
|
ST.hasFP32Denormals() ? FP_DENORM_FLUSH_NONE : FP_DENORM_FLUSH_IN_FLUSH_OUT;
|
|
|
|
|
|
|
|
uint32_t FP64Denormals =
|
|
|
|
ST.hasFP64Denormals() ? FP_DENORM_FLUSH_NONE : FP_DENORM_FLUSH_IN_FLUSH_OUT;
|
|
|
|
|
2014-06-27 01:22:30 +08:00
|
|
|
return FP_ROUND_MODE_SP(FP_ROUND_ROUND_TO_NEAREST) |
|
|
|
|
FP_ROUND_MODE_DP(FP_ROUND_ROUND_TO_NEAREST) |
|
2014-07-15 07:40:49 +08:00
|
|
|
FP_DENORM_MODE_SP(FP32Denormals) |
|
|
|
|
FP_DENORM_MODE_DP(FP64Denormals);
|
2014-06-27 01:22:30 +08:00
|
|
|
}
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2015-01-19 04:29:04 +08:00
|
|
|
static AsmPrinter *
|
|
|
|
createAMDGPUAsmPrinterPass(TargetMachine &tm,
|
|
|
|
std::unique_ptr<MCStreamer> &&Streamer) {
|
|
|
|
return new AMDGPUAsmPrinter(tm, std::move(Streamer));
|
2012-12-12 05:25:42 +08:00
|
|
|
}
|
|
|
|
|
2015-06-13 11:28:10 +08:00
|
|
|
extern "C" void LLVMInitializeAMDGPUAsmPrinter() {
|
2016-10-10 07:00:34 +08:00
|
|
|
TargetRegistry::RegisterAsmPrinter(getTheAMDGPUTarget(),
|
2018-05-25 04:02:01 +08:00
|
|
|
llvm::createR600AsmPrinterPass);
|
2016-10-10 07:00:34 +08:00
|
|
|
TargetRegistry::RegisterAsmPrinter(getTheGCNTarget(),
|
|
|
|
createAMDGPUAsmPrinterPass);
|
2012-12-12 05:25:42 +08:00
|
|
|
}
|
|
|
|
|
2015-01-19 04:29:04 +08:00
|
|
|
AMDGPUAsmPrinter::AMDGPUAsmPrinter(TargetMachine &TM,
|
|
|
|
std::unique_ptr<MCStreamer> Streamer)
|
2017-03-27 22:04:01 +08:00
|
|
|
: AsmPrinter(TM, std::move(Streamer)) {
|
|
|
|
AMDGPUASI = static_cast<AMDGPUTargetMachine*>(&TM)->getAMDGPUAS();
|
|
|
|
}
|
2013-10-12 13:02:51 +08:00
|
|
|
|
2016-10-01 10:56:57 +08:00
|
|
|
StringRef AMDGPUAsmPrinter::getPassName() const {
|
2016-07-23 01:01:25 +08:00
|
|
|
return "AMDGPU Assembly Printer";
|
|
|
|
}
|
|
|
|
|
2017-03-23 06:32:22 +08:00
|
|
|
const MCSubtargetInfo* AMDGPUAsmPrinter::getSTI() const {
|
|
|
|
return TM.getMCSubtargetInfo();
|
|
|
|
}
|
|
|
|
|
2017-10-15 06:16:26 +08:00
|
|
|
AMDGPUTargetStreamer* AMDGPUAsmPrinter::getTargetStreamer() const {
|
|
|
|
if (!OutStreamer)
|
|
|
|
return nullptr;
|
|
|
|
return static_cast<AMDGPUTargetStreamer*>(OutStreamer->getTargetStreamer());
|
2017-03-23 06:32:22 +08:00
|
|
|
}
|
|
|
|
|
2016-01-13 01:18:17 +08:00
|
|
|
void AMDGPUAsmPrinter::EmitStartOfAsmFile(Module &M) {
|
2018-06-13 02:02:46 +08:00
|
|
|
if (IsaInfo::hasCodeObjectV3(getSTI()) &&
|
|
|
|
TM.getTargetTriple().getOS() == Triple::AMDHSA)
|
|
|
|
return;
|
|
|
|
|
2017-10-14 23:59:07 +08:00
|
|
|
if (TM.getTargetTriple().getOS() != Triple::AMDHSA &&
|
|
|
|
TM.getTargetTriple().getOS() != Triple::AMDPAL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (TM.getTargetTriple().getOS() == Triple::AMDHSA)
|
|
|
|
HSAMetadataStream.begin(M);
|
|
|
|
|
|
|
|
if (TM.getTargetTriple().getOS() == Triple::AMDPAL)
|
2017-10-12 06:41:09 +08:00
|
|
|
readPALMetadata(M);
|
2017-10-14 23:59:07 +08:00
|
|
|
|
|
|
|
// HSA emits NT_AMDGPU_HSA_CODE_OBJECT_VERSION for code objects v2.
|
|
|
|
if (TM.getTargetTriple().getOS() == Triple::AMDHSA)
|
2017-10-15 06:16:26 +08:00
|
|
|
getTargetStreamer()->EmitDirectiveHSACodeObjectVersion(2, 1);
|
2017-10-14 23:59:07 +08:00
|
|
|
|
|
|
|
// HSA and PAL emit NT_AMDGPU_HSA_ISA for code objects v2.
|
|
|
|
IsaInfo::IsaVersion ISA = IsaInfo::getIsaVersion(getSTI()->getFeatureBits());
|
2017-10-15 06:16:26 +08:00
|
|
|
getTargetStreamer()->EmitDirectiveHSACodeObjectISA(
|
2017-03-23 06:32:22 +08:00
|
|
|
ISA.Major, ISA.Minor, ISA.Stepping, "AMD", "AMDGPU");
|
|
|
|
}
|
2016-05-06 01:03:33 +08:00
|
|
|
|
2017-03-23 06:32:22 +08:00
|
|
|
void AMDGPUAsmPrinter::EmitEndOfAsmFile(Module &M) {
|
2018-06-13 02:02:46 +08:00
|
|
|
// TODO: Add metadata to code object v3.
|
|
|
|
if (IsaInfo::hasCodeObjectV3(getSTI()) &&
|
|
|
|
TM.getTargetTriple().getOS() == Triple::AMDHSA)
|
|
|
|
return;
|
2017-10-14 23:40:33 +08:00
|
|
|
|
2017-10-15 06:16:26 +08:00
|
|
|
// Following code requires TargetStreamer to be present.
|
|
|
|
if (!getTargetStreamer())
|
|
|
|
return;
|
|
|
|
|
2017-10-14 23:40:33 +08:00
|
|
|
// Emit ISA Version (NT_AMD_AMDGPU_ISA).
|
|
|
|
std::string ISAVersionString;
|
|
|
|
raw_string_ostream ISAVersionStream(ISAVersionString);
|
|
|
|
IsaInfo::streamIsaVersion(getSTI(), ISAVersionStream);
|
2017-10-15 06:16:26 +08:00
|
|
|
getTargetStreamer()->EmitISAVersion(ISAVersionStream.str());
|
2017-10-14 23:40:33 +08:00
|
|
|
|
|
|
|
// Emit HSA Metadata (NT_AMD_AMDGPU_HSA_METADATA).
|
|
|
|
if (TM.getTargetTriple().getOS() == Triple::AMDHSA) {
|
|
|
|
HSAMetadataStream.end();
|
2017-10-15 06:16:26 +08:00
|
|
|
getTargetStreamer()->EmitHSAMetadata(HSAMetadataStream.getHSAMetadata());
|
2017-10-14 23:40:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Emit PAL Metadata (NT_AMD_AMDGPU_PAL_METADATA).
|
2017-10-04 03:03:52 +08:00
|
|
|
if (TM.getTargetTriple().getOS() == Triple::AMDPAL) {
|
|
|
|
// Copy the PAL metadata from the map where we collected it into a vector,
|
|
|
|
// then write it as a .note.
|
2017-10-12 06:41:09 +08:00
|
|
|
PALMD::Metadata PALMetadataVector;
|
|
|
|
for (auto i : PALMetadataMap) {
|
|
|
|
PALMetadataVector.push_back(i.first);
|
|
|
|
PALMetadataVector.push_back(i.second);
|
2017-10-04 03:03:52 +08:00
|
|
|
}
|
2017-10-15 06:16:26 +08:00
|
|
|
getTargetStreamer()->EmitPALMetadata(PALMetadataVector);
|
2017-10-04 03:03:52 +08:00
|
|
|
}
|
2016-01-13 01:18:17 +08:00
|
|
|
}
|
|
|
|
|
2016-10-07 00:20:41 +08:00
|
|
|
bool AMDGPUAsmPrinter::isBlockOnlyReachableByFallthrough(
|
|
|
|
const MachineBasicBlock *MBB) const {
|
|
|
|
if (!AsmPrinter::isBlockOnlyReachableByFallthrough(MBB))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (MBB->empty())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// If this is a block implementing a long branch, an expression relative to
|
|
|
|
// the start of the block is needed. to the start of the block.
|
|
|
|
// XXX - Is there a smarter way to check this?
|
|
|
|
return (MBB->back().getOpcode() != AMDGPU::S_SETPC_B64);
|
|
|
|
}
|
|
|
|
|
2015-06-27 05:14:58 +08:00
|
|
|
void AMDGPUAsmPrinter::EmitFunctionBodyStart() {
|
2018-06-13 02:02:46 +08:00
|
|
|
const SIMachineFunctionInfo &MFI = *MF->getInfo<SIMachineFunctionInfo>();
|
|
|
|
if (!MFI.isEntryFunction())
|
|
|
|
return;
|
|
|
|
if (IsaInfo::hasCodeObjectV3(getSTI()) &&
|
|
|
|
TM.getTargetTriple().getOS() == Triple::AMDHSA)
|
2017-04-20 03:38:10 +08:00
|
|
|
return;
|
|
|
|
|
2018-07-12 04:59:01 +08:00
|
|
|
const GCNSubtarget &STM = MF->getSubtarget<GCNSubtarget>();
|
2018-07-20 17:05:08 +08:00
|
|
|
const Function &F = MF->getFunction();
|
|
|
|
if (STM.isAmdCodeObjectV2(F) &&
|
|
|
|
(F.getCallingConv() == CallingConv::AMDGPU_KERNEL ||
|
|
|
|
F.getCallingConv() == CallingConv::SPIR_KERNEL)) {
|
|
|
|
amd_kernel_code_t KernelCode;
|
2017-05-03 01:14:00 +08:00
|
|
|
getAmdKernelCode(KernelCode, CurrentProgramInfo, *MF);
|
2017-10-15 06:16:26 +08:00
|
|
|
getTargetStreamer()->EmitAMDKernelCodeT(KernelCode);
|
2015-06-27 05:14:58 +08:00
|
|
|
}
|
2017-03-23 06:32:22 +08:00
|
|
|
|
|
|
|
if (TM.getTargetTriple().getOS() != Triple::AMDHSA)
|
|
|
|
return;
|
2017-10-12 06:59:35 +08:00
|
|
|
|
2018-07-11 01:31:32 +08:00
|
|
|
HSAMetadataStream.emitKernel(*MF, CurrentProgramInfo);
|
2015-06-27 05:14:58 +08:00
|
|
|
}
|
|
|
|
|
2018-06-13 02:02:46 +08:00
|
|
|
void AMDGPUAsmPrinter::EmitFunctionBodyEnd() {
|
|
|
|
const SIMachineFunctionInfo &MFI = *MF->getInfo<SIMachineFunctionInfo>();
|
|
|
|
if (!MFI.isEntryFunction())
|
|
|
|
return;
|
|
|
|
if (!IsaInfo::hasCodeObjectV3(getSTI()) ||
|
|
|
|
TM.getTargetTriple().getOS() != Triple::AMDHSA)
|
|
|
|
return;
|
|
|
|
|
2018-06-13 02:33:51 +08:00
|
|
|
auto &Streamer = getTargetStreamer()->getStreamer();
|
|
|
|
auto &Context = Streamer.getContext();
|
|
|
|
auto &ObjectFileInfo = *Context.getObjectFileInfo();
|
|
|
|
auto &ReadOnlySection = *ObjectFileInfo.getReadOnlySection();
|
|
|
|
|
|
|
|
Streamer.PushSection();
|
|
|
|
Streamer.SwitchSection(&ReadOnlySection);
|
|
|
|
|
|
|
|
// CP microcode requires the kernel descriptor to be allocated on 64 byte
|
|
|
|
// alignment.
|
|
|
|
Streamer.EmitValueToAlignment(64, 0, 1, 0);
|
|
|
|
if (ReadOnlySection.getAlignment() < 64)
|
|
|
|
ReadOnlySection.setAlignment(64);
|
|
|
|
|
2018-06-13 02:02:46 +08:00
|
|
|
SmallString<128> KernelName;
|
|
|
|
getNameWithPrefix(KernelName, &MF->getFunction());
|
|
|
|
getTargetStreamer()->EmitAmdhsaKernelDescriptor(
|
2018-06-22 03:38:56 +08:00
|
|
|
*getSTI(), KernelName, getAmdhsaKernelDescriptor(*MF, CurrentProgramInfo),
|
|
|
|
CurrentProgramInfo.NumVGPRsForWavesPerEU,
|
|
|
|
CurrentProgramInfo.NumSGPRsForWavesPerEU -
|
|
|
|
IsaInfo::getNumExtraSGPRs(getSTI()->getFeatureBits(),
|
|
|
|
CurrentProgramInfo.VCCUsed,
|
|
|
|
CurrentProgramInfo.FlatUsed),
|
|
|
|
CurrentProgramInfo.VCCUsed, CurrentProgramInfo.FlatUsed,
|
|
|
|
hasXNACK(*getSTI()));
|
2018-06-13 02:33:51 +08:00
|
|
|
|
|
|
|
Streamer.PopSection();
|
2018-06-13 02:02:46 +08:00
|
|
|
}
|
|
|
|
|
2015-11-06 19:45:14 +08:00
|
|
|
void AMDGPUAsmPrinter::EmitFunctionEntryLabel() {
|
2018-06-13 02:02:46 +08:00
|
|
|
if (IsaInfo::hasCodeObjectV3(getSTI()) &&
|
|
|
|
TM.getTargetTriple().getOS() == Triple::AMDHSA) {
|
|
|
|
AsmPrinter::EmitFunctionEntryLabel();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-11-06 19:45:14 +08:00
|
|
|
const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
|
2018-07-12 04:59:01 +08:00
|
|
|
const GCNSubtarget &STM = MF->getSubtarget<GCNSubtarget>();
|
2018-05-30 01:42:50 +08:00
|
|
|
if (MFI->isEntryFunction() && STM.isAmdCodeObjectV2(MF->getFunction())) {
|
2016-09-27 01:29:25 +08:00
|
|
|
SmallString<128> SymbolName;
|
2017-12-16 06:22:58 +08:00
|
|
|
getNameWithPrefix(SymbolName, &MF->getFunction()),
|
2017-10-15 06:16:26 +08:00
|
|
|
getTargetStreamer()->EmitAMDGPUSymbolType(
|
2017-03-23 06:32:22 +08:00
|
|
|
SymbolName, ELF::STT_AMDGPU_HSA_KERNEL);
|
2015-11-06 19:45:14 +08:00
|
|
|
}
|
2018-07-12 04:59:01 +08:00
|
|
|
const GCNSubtarget &STI = MF->getSubtarget<GCNSubtarget>();
|
2017-12-08 22:09:34 +08:00
|
|
|
if (STI.dumpCode()) {
|
|
|
|
// Disassemble function name label to text.
|
2017-12-16 06:22:58 +08:00
|
|
|
DisasmLines.push_back(MF->getName().str() + ":");
|
2017-12-08 22:09:34 +08:00
|
|
|
DisasmLineMaxLen = std::max(DisasmLineMaxLen, DisasmLines.back().size());
|
|
|
|
HexLines.push_back("");
|
|
|
|
}
|
2015-11-06 19:45:14 +08:00
|
|
|
|
|
|
|
AsmPrinter::EmitFunctionEntryLabel();
|
|
|
|
}
|
|
|
|
|
2017-12-08 22:09:34 +08:00
|
|
|
void AMDGPUAsmPrinter::EmitBasicBlockStart(const MachineBasicBlock &MBB) const {
|
2018-07-12 04:59:01 +08:00
|
|
|
const GCNSubtarget &STI = MBB.getParent()->getSubtarget<GCNSubtarget>();
|
2017-12-08 22:09:34 +08:00
|
|
|
if (STI.dumpCode() && !isBlockOnlyReachableByFallthrough(&MBB)) {
|
|
|
|
// Write a line for the basic block label if it is not only fallthrough.
|
|
|
|
DisasmLines.push_back(
|
|
|
|
(Twine("BB") + Twine(getFunctionNumber())
|
|
|
|
+ "_" + Twine(MBB.getNumber()) + ":").str());
|
|
|
|
DisasmLineMaxLen = std::max(DisasmLineMaxLen, DisasmLines.back().size());
|
|
|
|
HexLines.push_back("");
|
|
|
|
}
|
|
|
|
AsmPrinter::EmitBasicBlockStart(MBB);
|
|
|
|
}
|
|
|
|
|
2015-12-03 01:00:42 +08:00
|
|
|
void AMDGPUAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
|
|
|
|
|
2015-12-03 03:47:57 +08:00
|
|
|
// Group segment variables aren't emitted in HSA.
|
2017-11-02 03:12:38 +08:00
|
|
|
if (AMDGPU::isGroupSegment(GV))
|
2015-12-03 03:47:57 +08:00
|
|
|
return;
|
|
|
|
|
2016-05-06 01:03:33 +08:00
|
|
|
AsmPrinter::EmitGlobalVariable(GV);
|
2015-12-03 01:00:42 +08:00
|
|
|
}
|
|
|
|
|
2017-05-03 01:14:00 +08:00
|
|
|
bool AMDGPUAsmPrinter::doFinalization(Module &M) {
|
|
|
|
CallGraphResourceInfo.clear();
|
|
|
|
return AsmPrinter::doFinalization(M);
|
|
|
|
}
|
|
|
|
|
2017-10-04 03:03:52 +08:00
|
|
|
// For the amdpal OS type, read the amdgpu.pal.metadata supplied by the
|
2017-10-12 06:41:09 +08:00
|
|
|
// frontend into our PALMetadataMap, ready for per-function modification. It
|
2017-10-04 03:03:52 +08:00
|
|
|
// is a NamedMD containing an MDTuple containing a number of MDNodes each of
|
|
|
|
// which is an integer value, and each two integer values forms a key=value
|
2017-10-12 06:41:09 +08:00
|
|
|
// pair that we store as PALMetadataMap[key]=value in the map.
|
|
|
|
void AMDGPUAsmPrinter::readPALMetadata(Module &M) {
|
2017-10-04 03:03:52 +08:00
|
|
|
auto NamedMD = M.getNamedMetadata("amdgpu.pal.metadata");
|
|
|
|
if (!NamedMD || !NamedMD->getNumOperands())
|
|
|
|
return;
|
|
|
|
auto Tuple = dyn_cast<MDTuple>(NamedMD->getOperand(0));
|
|
|
|
if (!Tuple)
|
|
|
|
return;
|
|
|
|
for (unsigned I = 0, E = Tuple->getNumOperands() & -2; I != E; I += 2) {
|
|
|
|
auto Key = mdconst::dyn_extract<ConstantInt>(Tuple->getOperand(I));
|
|
|
|
auto Val = mdconst::dyn_extract<ConstantInt>(Tuple->getOperand(I + 1));
|
|
|
|
if (!Key || !Val)
|
|
|
|
continue;
|
2017-10-12 06:41:09 +08:00
|
|
|
PALMetadataMap[Key->getZExtValue()] = Val->getZExtValue();
|
2017-10-04 03:03:52 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-03 01:14:00 +08:00
|
|
|
// Print comments that apply to both callable functions and entry points.
|
|
|
|
void AMDGPUAsmPrinter::emitCommonFunctionComments(
|
|
|
|
uint32_t NumVGPR,
|
|
|
|
uint32_t NumSGPR,
|
2017-11-15 04:33:14 +08:00
|
|
|
uint64_t ScratchSize,
|
2018-05-26 01:25:12 +08:00
|
|
|
uint64_t CodeSize,
|
|
|
|
const AMDGPUMachineFunction *MFI) {
|
2017-05-03 01:14:00 +08:00
|
|
|
OutStreamer->emitRawComment(" codeLenInByte = " + Twine(CodeSize), false);
|
|
|
|
OutStreamer->emitRawComment(" NumSgprs: " + Twine(NumSGPR), false);
|
|
|
|
OutStreamer->emitRawComment(" NumVgprs: " + Twine(NumVGPR), false);
|
|
|
|
OutStreamer->emitRawComment(" ScratchSize: " + Twine(ScratchSize), false);
|
2018-05-26 01:25:12 +08:00
|
|
|
OutStreamer->emitRawComment(" MemoryBound: " + Twine(MFI->isMemoryBound()),
|
|
|
|
false);
|
2017-05-03 01:14:00 +08:00
|
|
|
}
|
|
|
|
|
2018-06-13 02:02:46 +08:00
|
|
|
uint16_t AMDGPUAsmPrinter::getAmdhsaKernelCodeProperties(
|
|
|
|
const MachineFunction &MF) const {
|
|
|
|
const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
|
|
|
|
uint16_t KernelCodeProperties = 0;
|
|
|
|
|
|
|
|
if (MFI.hasPrivateSegmentBuffer()) {
|
|
|
|
KernelCodeProperties |=
|
|
|
|
amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER;
|
|
|
|
}
|
|
|
|
if (MFI.hasDispatchPtr()) {
|
|
|
|
KernelCodeProperties |=
|
|
|
|
amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR;
|
|
|
|
}
|
|
|
|
if (MFI.hasQueuePtr()) {
|
|
|
|
KernelCodeProperties |=
|
|
|
|
amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR;
|
|
|
|
}
|
|
|
|
if (MFI.hasKernargSegmentPtr()) {
|
|
|
|
KernelCodeProperties |=
|
|
|
|
amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR;
|
|
|
|
}
|
|
|
|
if (MFI.hasDispatchID()) {
|
|
|
|
KernelCodeProperties |=
|
|
|
|
amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID;
|
|
|
|
}
|
|
|
|
if (MFI.hasFlatScratchInit()) {
|
|
|
|
KernelCodeProperties |=
|
|
|
|
amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return KernelCodeProperties;
|
|
|
|
}
|
|
|
|
|
|
|
|
amdhsa::kernel_descriptor_t AMDGPUAsmPrinter::getAmdhsaKernelDescriptor(
|
|
|
|
const MachineFunction &MF,
|
|
|
|
const SIProgramInfo &PI) const {
|
|
|
|
amdhsa::kernel_descriptor_t KernelDescriptor;
|
|
|
|
memset(&KernelDescriptor, 0x0, sizeof(KernelDescriptor));
|
|
|
|
|
|
|
|
assert(isUInt<32>(PI.ScratchSize));
|
|
|
|
assert(isUInt<32>(PI.ComputePGMRSrc1));
|
|
|
|
assert(isUInt<32>(PI.ComputePGMRSrc2));
|
|
|
|
|
|
|
|
KernelDescriptor.group_segment_fixed_size = PI.LDSSize;
|
|
|
|
KernelDescriptor.private_segment_fixed_size = PI.ScratchSize;
|
|
|
|
KernelDescriptor.compute_pgm_rsrc1 = PI.ComputePGMRSrc1;
|
|
|
|
KernelDescriptor.compute_pgm_rsrc2 = PI.ComputePGMRSrc2;
|
|
|
|
KernelDescriptor.kernel_code_properties = getAmdhsaKernelCodeProperties(MF);
|
|
|
|
|
|
|
|
return KernelDescriptor;
|
|
|
|
}
|
|
|
|
|
2012-12-12 05:25:42 +08:00
|
|
|
bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
|
2017-05-03 01:14:00 +08:00
|
|
|
CurrentProgramInfo = SIProgramInfo();
|
|
|
|
|
2017-04-20 01:42:39 +08:00
|
|
|
const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
|
2014-10-04 03:02:02 +08:00
|
|
|
|
|
|
|
// The starting address of all shader programs must be 256 bytes aligned.
|
2017-04-20 01:42:39 +08:00
|
|
|
// Regular functions just need the basic required instruction alignment.
|
|
|
|
MF.setAlignment(MFI->isEntryFunction() ? 8 : 2);
|
2014-10-04 03:02:02 +08:00
|
|
|
|
2012-12-12 05:25:42 +08:00
|
|
|
SetupMachineFunction(MF);
|
2013-12-05 13:15:35 +08:00
|
|
|
|
2018-07-12 04:59:01 +08:00
|
|
|
const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
|
2013-10-12 13:02:51 +08:00
|
|
|
MCContext &Context = getObjFileLowering().getContext();
|
2018-02-06 21:39:38 +08:00
|
|
|
// FIXME: This should be an explicit check for Mesa.
|
|
|
|
if (!STM.isAmdHsaOS() && !STM.isAmdPalOS()) {
|
2017-01-07 01:02:10 +08:00
|
|
|
MCSectionELF *ConfigSection =
|
|
|
|
Context.getELFSection(".AMDGPU.config", ELF::SHT_PROGBITS, 0);
|
|
|
|
OutStreamer->SwitchSection(ConfigSection);
|
|
|
|
}
|
2013-12-05 13:15:35 +08:00
|
|
|
|
2018-05-25 04:02:01 +08:00
|
|
|
if (MFI->isEntryFunction()) {
|
|
|
|
getSIProgramInfo(CurrentProgramInfo, MF);
|
2013-04-17 23:17:25 +08:00
|
|
|
} else {
|
2018-05-25 04:02:01 +08:00
|
|
|
auto I = CallGraphResourceInfo.insert(
|
|
|
|
std::make_pair(&MF.getFunction(), SIFunctionResourceInfo()));
|
|
|
|
SIFunctionResourceInfo &Info = I.first->second;
|
|
|
|
assert(I.second && "should only be called once per function");
|
|
|
|
Info = analyzeResourceUsage(MF);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (STM.isAmdPalOS())
|
|
|
|
EmitPALMetadata(MF, CurrentProgramInfo);
|
|
|
|
else if (!STM.isAmdHsaOS()) {
|
|
|
|
EmitProgramInfoSI(MF, CurrentProgramInfo);
|
2012-12-12 05:25:42 +08:00
|
|
|
}
|
2013-10-12 13:02:51 +08:00
|
|
|
|
|
|
|
DisasmLines.clear();
|
|
|
|
HexLines.clear();
|
|
|
|
DisasmLineMaxLen = 0;
|
|
|
|
|
2012-12-12 05:25:42 +08:00
|
|
|
EmitFunctionBody();
|
2013-10-12 13:02:51 +08:00
|
|
|
|
2014-02-01 06:08:19 +08:00
|
|
|
if (isVerbose()) {
|
2015-05-22 03:20:38 +08:00
|
|
|
MCSectionELF *CommentSection =
|
2015-01-30 01:33:21 +08:00
|
|
|
Context.getELFSection(".AMDGPU.csdata", ELF::SHT_PROGBITS, 0);
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->SwitchSection(CommentSection);
|
2013-12-05 13:15:35 +08:00
|
|
|
|
2018-05-25 04:02:01 +08:00
|
|
|
if (!MFI->isEntryFunction()) {
|
|
|
|
OutStreamer->emitRawComment(" Function info:", false);
|
|
|
|
SIFunctionResourceInfo &Info = CallGraphResourceInfo[&MF.getFunction()];
|
|
|
|
emitCommonFunctionComments(
|
|
|
|
Info.NumVGPR,
|
2018-07-12 04:59:01 +08:00
|
|
|
Info.getTotalNumSGPRs(MF.getSubtarget<GCNSubtarget>()),
|
2018-05-25 04:02:01 +08:00
|
|
|
Info.PrivateSegmentSize,
|
2018-05-26 01:25:12 +08:00
|
|
|
getFunctionCodeSize(MF), MFI);
|
2018-05-25 04:02:01 +08:00
|
|
|
return false;
|
|
|
|
}
|
2016-06-25 11:11:28 +08:00
|
|
|
|
2018-05-25 04:02:01 +08:00
|
|
|
OutStreamer->emitRawComment(" Kernel info:", false);
|
|
|
|
emitCommonFunctionComments(CurrentProgramInfo.NumVGPR,
|
|
|
|
CurrentProgramInfo.NumSGPR,
|
|
|
|
CurrentProgramInfo.ScratchSize,
|
2018-05-26 01:25:12 +08:00
|
|
|
getFunctionCodeSize(MF), MFI);
|
2018-05-25 04:02:01 +08:00
|
|
|
|
|
|
|
OutStreamer->emitRawComment(
|
|
|
|
" FloatMode: " + Twine(CurrentProgramInfo.FloatMode), false);
|
|
|
|
OutStreamer->emitRawComment(
|
|
|
|
" IeeeMode: " + Twine(CurrentProgramInfo.IEEEMode), false);
|
|
|
|
OutStreamer->emitRawComment(
|
|
|
|
" LDSByteSize: " + Twine(CurrentProgramInfo.LDSSize) +
|
|
|
|
" bytes/workgroup (compile time only)", false);
|
|
|
|
|
|
|
|
OutStreamer->emitRawComment(
|
|
|
|
" SGPRBlocks: " + Twine(CurrentProgramInfo.SGPRBlocks), false);
|
|
|
|
OutStreamer->emitRawComment(
|
|
|
|
" VGPRBlocks: " + Twine(CurrentProgramInfo.VGPRBlocks), false);
|
|
|
|
|
|
|
|
OutStreamer->emitRawComment(
|
|
|
|
" NumSGPRsForWavesPerEU: " +
|
|
|
|
Twine(CurrentProgramInfo.NumSGPRsForWavesPerEU), false);
|
|
|
|
OutStreamer->emitRawComment(
|
|
|
|
" NumVGPRsForWavesPerEU: " +
|
|
|
|
Twine(CurrentProgramInfo.NumVGPRsForWavesPerEU), false);
|
|
|
|
|
2018-05-26 01:25:12 +08:00
|
|
|
OutStreamer->emitRawComment(
|
|
|
|
" WaveLimiterHint : " + Twine(MFI->needsWaveLimiter()), false);
|
|
|
|
|
2018-07-12 04:59:01 +08:00
|
|
|
if (MF.getSubtarget<GCNSubtarget>().debuggerEmitPrologue()) {
|
2017-05-03 01:14:00 +08:00
|
|
|
OutStreamer->emitRawComment(
|
2018-05-25 04:02:01 +08:00
|
|
|
" DebuggerWavefrontPrivateSegmentOffsetSGPR: s" +
|
|
|
|
Twine(CurrentProgramInfo.DebuggerWavefrontPrivateSegmentOffsetSGPR), false);
|
2017-05-03 01:14:00 +08:00
|
|
|
OutStreamer->emitRawComment(
|
2018-05-25 04:02:01 +08:00
|
|
|
" DebuggerPrivateSegmentBufferSGPR: s" +
|
|
|
|
Twine(CurrentProgramInfo.DebuggerPrivateSegmentBufferSGPR), false);
|
2014-01-23 05:55:35 +08:00
|
|
|
}
|
2018-05-25 04:02:01 +08:00
|
|
|
|
|
|
|
OutStreamer->emitRawComment(
|
|
|
|
" COMPUTE_PGM_RSRC2:USER_SGPR: " +
|
|
|
|
Twine(G_00B84C_USER_SGPR(CurrentProgramInfo.ComputePGMRSrc2)), false);
|
|
|
|
OutStreamer->emitRawComment(
|
|
|
|
" COMPUTE_PGM_RSRC2:TRAP_HANDLER: " +
|
|
|
|
Twine(G_00B84C_TRAP_HANDLER(CurrentProgramInfo.ComputePGMRSrc2)), false);
|
|
|
|
OutStreamer->emitRawComment(
|
|
|
|
" COMPUTE_PGM_RSRC2:TGID_X_EN: " +
|
|
|
|
Twine(G_00B84C_TGID_X_EN(CurrentProgramInfo.ComputePGMRSrc2)), false);
|
|
|
|
OutStreamer->emitRawComment(
|
|
|
|
" COMPUTE_PGM_RSRC2:TGID_Y_EN: " +
|
|
|
|
Twine(G_00B84C_TGID_Y_EN(CurrentProgramInfo.ComputePGMRSrc2)), false);
|
|
|
|
OutStreamer->emitRawComment(
|
|
|
|
" COMPUTE_PGM_RSRC2:TGID_Z_EN: " +
|
|
|
|
Twine(G_00B84C_TGID_Z_EN(CurrentProgramInfo.ComputePGMRSrc2)), false);
|
|
|
|
OutStreamer->emitRawComment(
|
|
|
|
" COMPUTE_PGM_RSRC2:TIDIG_COMP_CNT: " +
|
|
|
|
Twine(G_00B84C_TIDIG_COMP_CNT(CurrentProgramInfo.ComputePGMRSrc2)),
|
|
|
|
false);
|
2013-12-05 13:15:35 +08:00
|
|
|
}
|
|
|
|
|
2015-02-19 09:10:49 +08:00
|
|
|
if (STM.dumpCode()) {
|
2015-01-07 01:59:56 +08:00
|
|
|
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->SwitchSection(
|
2015-01-30 01:33:21 +08:00
|
|
|
Context.getELFSection(".AMDGPU.disasm", ELF::SHT_NOTE, 0));
|
2015-01-07 01:59:56 +08:00
|
|
|
|
|
|
|
for (size_t i = 0; i < DisasmLines.size(); ++i) {
|
2017-12-08 22:09:34 +08:00
|
|
|
std::string Comment = "\n";
|
|
|
|
if (!HexLines[i].empty()) {
|
|
|
|
Comment = std::string(DisasmLineMaxLen - DisasmLines[i].size(), ' ');
|
|
|
|
Comment += " ; " + HexLines[i] + "\n";
|
|
|
|
}
|
2015-01-07 01:59:56 +08:00
|
|
|
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitBytes(StringRef(DisasmLines[i]));
|
|
|
|
OutStreamer->EmitBytes(StringRef(Comment));
|
2013-10-12 13:02:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-12 05:25:42 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-04-18 03:48:30 +08:00
|
|
|
uint64_t AMDGPUAsmPrinter::getFunctionCodeSize(const MachineFunction &MF) const {
|
2018-07-12 04:59:01 +08:00
|
|
|
const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
|
2016-06-24 14:30:11 +08:00
|
|
|
const SIInstrInfo *TII = STM.getInstrInfo();
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2017-04-18 03:48:30 +08:00
|
|
|
uint64_t CodeSize = 0;
|
|
|
|
|
2014-07-13 11:06:43 +08:00
|
|
|
for (const MachineBasicBlock &MBB : MF) {
|
|
|
|
for (const MachineInstr &MI : MBB) {
|
2014-04-16 06:40:47 +08:00
|
|
|
// TODO: CodeSize should account for multiple functions.
|
2015-08-12 17:04:44 +08:00
|
|
|
|
|
|
|
// TODO: Should we count size of debug info?
|
2018-05-09 10:42:00 +08:00
|
|
|
if (MI.isDebugInstr())
|
2015-08-12 17:04:44 +08:00
|
|
|
continue;
|
|
|
|
|
2017-04-18 03:48:30 +08:00
|
|
|
CodeSize += TII->getInstSizeInBytes(MI);
|
|
|
|
}
|
|
|
|
}
|
2014-04-16 06:40:47 +08:00
|
|
|
|
2017-04-18 03:48:30 +08:00
|
|
|
return CodeSize;
|
|
|
|
}
|
2014-01-09 05:47:14 +08:00
|
|
|
|
2017-04-18 03:48:30 +08:00
|
|
|
static bool hasAnyNonFlatUseOfReg(const MachineRegisterInfo &MRI,
|
|
|
|
const SIInstrInfo &TII,
|
|
|
|
unsigned Reg) {
|
|
|
|
for (const MachineOperand &UseOp : MRI.reg_operands(Reg)) {
|
|
|
|
if (!UseOp.isImplicit() || !TII.isFLAT(*UseOp.getParent()))
|
|
|
|
return true;
|
|
|
|
}
|
2015-10-02 05:51:59 +08:00
|
|
|
|
2017-04-18 03:48:30 +08:00
|
|
|
return false;
|
|
|
|
}
|
2015-10-02 05:51:59 +08:00
|
|
|
|
2017-05-03 01:14:00 +08:00
|
|
|
int32_t AMDGPUAsmPrinter::SIFunctionResourceInfo::getTotalNumSGPRs(
|
2018-07-12 04:59:01 +08:00
|
|
|
const GCNSubtarget &ST) const {
|
2018-06-22 03:38:56 +08:00
|
|
|
return NumExplicitSGPR + IsaInfo::getNumExtraSGPRs(ST.getFeatureBits(),
|
|
|
|
UsesVCC, UsesFlatScratch);
|
2017-05-03 01:14:00 +08:00
|
|
|
}
|
2013-12-05 13:15:35 +08:00
|
|
|
|
2017-05-03 01:14:00 +08:00
|
|
|
AMDGPUAsmPrinter::SIFunctionResourceInfo AMDGPUAsmPrinter::analyzeResourceUsage(
|
|
|
|
const MachineFunction &MF) const {
|
|
|
|
SIFunctionResourceInfo Info;
|
2016-01-05 07:35:53 +08:00
|
|
|
|
2017-05-03 01:14:00 +08:00
|
|
|
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
|
2018-07-12 04:59:01 +08:00
|
|
|
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
|
2017-05-03 01:14:00 +08:00
|
|
|
const MachineFrameInfo &FrameInfo = MF.getFrameInfo();
|
|
|
|
const MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
const SIInstrInfo *TII = ST.getInstrInfo();
|
|
|
|
const SIRegisterInfo &TRI = TII->getRegisterInfo();
|
|
|
|
|
|
|
|
Info.UsesFlatScratch = MRI.isPhysRegUsed(AMDGPU::FLAT_SCR_LO) ||
|
|
|
|
MRI.isPhysRegUsed(AMDGPU::FLAT_SCR_HI);
|
2017-04-18 03:48:30 +08:00
|
|
|
|
|
|
|
// Even if FLAT_SCRATCH is implicitly used, it has no effect if flat
|
2017-05-03 01:14:00 +08:00
|
|
|
// instructions aren't used to access the scratch buffer. Inline assembly may
|
|
|
|
// need it though.
|
2017-04-18 03:48:30 +08:00
|
|
|
//
|
|
|
|
// If we only have implicit uses of flat_scr on flat instructions, it is not
|
|
|
|
// really needed.
|
2017-05-03 01:14:00 +08:00
|
|
|
if (Info.UsesFlatScratch && !MFI->hasFlatScratchInit() &&
|
2017-04-18 03:48:30 +08:00
|
|
|
(!hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR) &&
|
|
|
|
!hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR_LO) &&
|
|
|
|
!hasAnyNonFlatUseOfReg(MRI, *TII, AMDGPU::FLAT_SCR_HI))) {
|
2017-05-03 01:14:00 +08:00
|
|
|
Info.UsesFlatScratch = false;
|
2017-04-18 03:48:30 +08:00
|
|
|
}
|
|
|
|
|
2017-05-03 01:14:00 +08:00
|
|
|
Info.HasDynamicallySizedStack = FrameInfo.hasVarSizedObjects();
|
|
|
|
Info.PrivateSegmentSize = FrameInfo.getStackSize();
|
2018-03-30 05:30:06 +08:00
|
|
|
if (MFI->isStackRealigned())
|
|
|
|
Info.PrivateSegmentSize += FrameInfo.getMaxAlignment();
|
AMDGPU/SI: xnack_mask is always reserved on VI
Summary:
Somehow, I first interpreted the docs as saying space for xnack_mask is only
reserved when XNACK is enabled via SH_MEM_CONFIG. I felt uneasy about this and
went back to actually test what is happening, and it turns out that xnack_mask
is always reserved at least on Tonga and Carrizo, in the sense that flat_scr
is always fixed below the SGPRs that are used to implement xnack_mask, whether
or not they are actually used.
I confirmed this by writing a shader using inline assembly to tease out the
aliasing between flat_scratch and regular SGPRs. For example, on Tonga, where
we fix the number of SGPRs to 80, s[74:75] aliases flat_scratch (so
xnack_mask is s[76:77] and vcc is s[78:79]).
This patch changes both the calculation of the total number of SGPRs and the
various register reservations to account for this.
It ought to be possible to use the gap left by xnack_mask when the feature
isn't used, but this patch doesn't try to do that. (Note that the same applies
to vcc.)
Note that previously, even before my earlier change in r256794, the SGPRs that
alias to xnack_mask could end up being used as well when flat_scr was unused
and the total number of SGPRs happened to fall on the right alignment
(e.g. highest regular SGPR being used s29 and VCC used would lead to number
of SGPRs being 32, where s28 and s29 alias with xnack_mask). So if there
were some conflict due to such aliasing, we should have noticed that already.
Reviewers: arsenm, tstellarAMD
Subscribers: arsenm, llvm-commits
Differential Revision: http://reviews.llvm.org/D15898
llvm-svn: 257073
2016-01-08 01:10:20 +08:00
|
|
|
|
2017-05-03 01:14:00 +08:00
|
|
|
|
2017-06-01 23:05:15 +08:00
|
|
|
Info.UsesVCC = MRI.isPhysRegUsed(AMDGPU::VCC_LO) ||
|
|
|
|
MRI.isPhysRegUsed(AMDGPU::VCC_HI);
|
2017-05-03 01:14:00 +08:00
|
|
|
|
2017-06-01 23:05:15 +08:00
|
|
|
// If there are no calls, MachineRegisterInfo can tell us the used register
|
|
|
|
// count easily.
|
2017-09-06 02:36:36 +08:00
|
|
|
// A tail call isn't considered a call for MachineFrameInfo's purposes.
|
|
|
|
if (!FrameInfo.hasCalls() && !FrameInfo.hasTailCall()) {
|
2017-08-03 01:15:01 +08:00
|
|
|
MCPhysReg HighestVGPRReg = AMDGPU::NoRegister;
|
|
|
|
for (MCPhysReg Reg : reverse(AMDGPU::VGPR_32RegClass.getRegisters())) {
|
|
|
|
if (MRI.isPhysRegUsed(Reg)) {
|
|
|
|
HighestVGPRReg = Reg;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2017-05-03 01:14:00 +08:00
|
|
|
|
2017-08-03 01:15:01 +08:00
|
|
|
MCPhysReg HighestSGPRReg = AMDGPU::NoRegister;
|
|
|
|
for (MCPhysReg Reg : reverse(AMDGPU::SGPR_32RegClass.getRegisters())) {
|
|
|
|
if (MRI.isPhysRegUsed(Reg)) {
|
|
|
|
HighestSGPRReg = Reg;
|
|
|
|
break;
|
|
|
|
}
|
2017-06-01 23:05:15 +08:00
|
|
|
}
|
2017-08-03 01:15:01 +08:00
|
|
|
|
|
|
|
// We found the maximum register index. They start at 0, so add one to get the
|
|
|
|
// number of registers.
|
|
|
|
Info.NumVGPR = HighestVGPRReg == AMDGPU::NoRegister ? 0 :
|
|
|
|
TRI.getHWRegIndex(HighestVGPRReg) + 1;
|
|
|
|
Info.NumExplicitSGPR = HighestSGPRReg == AMDGPU::NoRegister ? 0 :
|
|
|
|
TRI.getHWRegIndex(HighestSGPRReg) + 1;
|
|
|
|
|
|
|
|
return Info;
|
2015-12-18 01:05:09 +08:00
|
|
|
}
|
2014-09-15 23:41:53 +08:00
|
|
|
|
2017-08-02 09:31:28 +08:00
|
|
|
int32_t MaxVGPR = -1;
|
|
|
|
int32_t MaxSGPR = -1;
|
2017-11-15 04:33:14 +08:00
|
|
|
uint64_t CalleeFrameSize = 0;
|
2017-08-02 09:31:28 +08:00
|
|
|
|
|
|
|
for (const MachineBasicBlock &MBB : MF) {
|
|
|
|
for (const MachineInstr &MI : MBB) {
|
|
|
|
// TODO: Check regmasks? Do they occur anywhere except calls?
|
|
|
|
for (const MachineOperand &MO : MI.operands()) {
|
|
|
|
unsigned Width = 0;
|
|
|
|
bool IsSGPR = false;
|
|
|
|
|
|
|
|
if (!MO.isReg())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
switch (Reg) {
|
|
|
|
case AMDGPU::EXEC:
|
|
|
|
case AMDGPU::EXEC_LO:
|
|
|
|
case AMDGPU::EXEC_HI:
|
|
|
|
case AMDGPU::SCC:
|
|
|
|
case AMDGPU::M0:
|
|
|
|
case AMDGPU::SRC_SHARED_BASE:
|
|
|
|
case AMDGPU::SRC_SHARED_LIMIT:
|
|
|
|
case AMDGPU::SRC_PRIVATE_BASE:
|
|
|
|
case AMDGPU::SRC_PRIVATE_LIMIT:
|
|
|
|
continue;
|
|
|
|
|
|
|
|
case AMDGPU::NoRegister:
|
2018-05-09 10:42:00 +08:00
|
|
|
assert(MI.isDebugInstr());
|
2017-08-02 09:31:28 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
case AMDGPU::VCC:
|
|
|
|
case AMDGPU::VCC_LO:
|
|
|
|
case AMDGPU::VCC_HI:
|
|
|
|
Info.UsesVCC = true;
|
|
|
|
continue;
|
|
|
|
|
|
|
|
case AMDGPU::FLAT_SCR:
|
|
|
|
case AMDGPU::FLAT_SCR_LO:
|
|
|
|
case AMDGPU::FLAT_SCR_HI:
|
|
|
|
continue;
|
|
|
|
|
2018-01-10 22:22:19 +08:00
|
|
|
case AMDGPU::XNACK_MASK:
|
|
|
|
case AMDGPU::XNACK_MASK_LO:
|
|
|
|
case AMDGPU::XNACK_MASK_HI:
|
|
|
|
llvm_unreachable("xnack_mask registers should not be used");
|
|
|
|
|
2017-08-02 09:31:28 +08:00
|
|
|
case AMDGPU::TBA:
|
|
|
|
case AMDGPU::TBA_LO:
|
|
|
|
case AMDGPU::TBA_HI:
|
|
|
|
case AMDGPU::TMA:
|
|
|
|
case AMDGPU::TMA_LO:
|
|
|
|
case AMDGPU::TMA_HI:
|
|
|
|
llvm_unreachable("trap handler registers should not be used");
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (AMDGPU::SReg_32RegClass.contains(Reg)) {
|
|
|
|
assert(!AMDGPU::TTMP_32RegClass.contains(Reg) &&
|
|
|
|
"trap handler registers should not be used");
|
|
|
|
IsSGPR = true;
|
|
|
|
Width = 1;
|
|
|
|
} else if (AMDGPU::VGPR_32RegClass.contains(Reg)) {
|
|
|
|
IsSGPR = false;
|
|
|
|
Width = 1;
|
|
|
|
} else if (AMDGPU::SReg_64RegClass.contains(Reg)) {
|
|
|
|
assert(!AMDGPU::TTMP_64RegClass.contains(Reg) &&
|
|
|
|
"trap handler registers should not be used");
|
|
|
|
IsSGPR = true;
|
|
|
|
Width = 2;
|
|
|
|
} else if (AMDGPU::VReg_64RegClass.contains(Reg)) {
|
|
|
|
IsSGPR = false;
|
|
|
|
Width = 2;
|
|
|
|
} else if (AMDGPU::VReg_96RegClass.contains(Reg)) {
|
|
|
|
IsSGPR = false;
|
|
|
|
Width = 3;
|
|
|
|
} else if (AMDGPU::SReg_128RegClass.contains(Reg)) {
|
2017-12-22 23:18:06 +08:00
|
|
|
assert(!AMDGPU::TTMP_128RegClass.contains(Reg) &&
|
|
|
|
"trap handler registers should not be used");
|
2017-08-02 09:31:28 +08:00
|
|
|
IsSGPR = true;
|
|
|
|
Width = 4;
|
|
|
|
} else if (AMDGPU::VReg_128RegClass.contains(Reg)) {
|
|
|
|
IsSGPR = false;
|
|
|
|
Width = 4;
|
|
|
|
} else if (AMDGPU::SReg_256RegClass.contains(Reg)) {
|
2017-12-22 23:18:06 +08:00
|
|
|
assert(!AMDGPU::TTMP_256RegClass.contains(Reg) &&
|
|
|
|
"trap handler registers should not be used");
|
2017-08-02 09:31:28 +08:00
|
|
|
IsSGPR = true;
|
|
|
|
Width = 8;
|
|
|
|
} else if (AMDGPU::VReg_256RegClass.contains(Reg)) {
|
|
|
|
IsSGPR = false;
|
|
|
|
Width = 8;
|
|
|
|
} else if (AMDGPU::SReg_512RegClass.contains(Reg)) {
|
2017-12-22 23:18:06 +08:00
|
|
|
assert(!AMDGPU::TTMP_512RegClass.contains(Reg) &&
|
|
|
|
"trap handler registers should not be used");
|
2017-08-02 09:31:28 +08:00
|
|
|
IsSGPR = true;
|
|
|
|
Width = 16;
|
|
|
|
} else if (AMDGPU::VReg_512RegClass.contains(Reg)) {
|
|
|
|
IsSGPR = false;
|
|
|
|
Width = 16;
|
|
|
|
} else {
|
|
|
|
llvm_unreachable("Unknown register class");
|
|
|
|
}
|
|
|
|
unsigned HWReg = TRI.getHWRegIndex(Reg);
|
|
|
|
int MaxUsed = HWReg + Width - 1;
|
|
|
|
if (IsSGPR) {
|
|
|
|
MaxSGPR = MaxUsed > MaxSGPR ? MaxUsed : MaxSGPR;
|
|
|
|
} else {
|
|
|
|
MaxVGPR = MaxUsed > MaxVGPR ? MaxUsed : MaxVGPR;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (MI.isCall()) {
|
|
|
|
// Pseudo used just to encode the underlying global. Is there a better
|
|
|
|
// way to track this?
|
2017-08-12 04:42:08 +08:00
|
|
|
|
|
|
|
const MachineOperand *CalleeOp
|
|
|
|
= TII->getNamedOperand(MI, AMDGPU::OpName::callee);
|
|
|
|
const Function *Callee = cast<Function>(CalleeOp->getGlobal());
|
2017-08-02 09:31:28 +08:00
|
|
|
if (Callee->isDeclaration()) {
|
|
|
|
// If this is a call to an external function, we can't do much. Make
|
|
|
|
// conservative guesses.
|
|
|
|
|
|
|
|
// 48 SGPRs - vcc, - flat_scr, -xnack
|
2018-06-22 03:38:56 +08:00
|
|
|
int MaxSGPRGuess =
|
|
|
|
47 - IsaInfo::getNumExtraSGPRs(ST.getFeatureBits(), true,
|
|
|
|
ST.hasFlatAddressSpace());
|
2017-08-02 09:31:28 +08:00
|
|
|
MaxSGPR = std::max(MaxSGPR, MaxSGPRGuess);
|
|
|
|
MaxVGPR = std::max(MaxVGPR, 23);
|
|
|
|
|
2017-11-15 04:33:14 +08:00
|
|
|
CalleeFrameSize = std::max(CalleeFrameSize, UINT64_C(16384));
|
2017-08-02 09:31:28 +08:00
|
|
|
Info.UsesVCC = true;
|
|
|
|
Info.UsesFlatScratch = ST.hasFlatAddressSpace();
|
|
|
|
Info.HasDynamicallySizedStack = true;
|
|
|
|
} else {
|
|
|
|
// We force CodeGen to run in SCC order, so the callee's register
|
|
|
|
// usage etc. should be the cumulative usage of all callees.
|
|
|
|
auto I = CallGraphResourceInfo.find(Callee);
|
|
|
|
assert(I != CallGraphResourceInfo.end() &&
|
|
|
|
"callee should have been handled before caller");
|
|
|
|
|
|
|
|
MaxSGPR = std::max(I->second.NumExplicitSGPR - 1, MaxSGPR);
|
|
|
|
MaxVGPR = std::max(I->second.NumVGPR - 1, MaxVGPR);
|
|
|
|
CalleeFrameSize
|
|
|
|
= std::max(I->second.PrivateSegmentSize, CalleeFrameSize);
|
|
|
|
Info.UsesVCC |= I->second.UsesVCC;
|
|
|
|
Info.UsesFlatScratch |= I->second.UsesFlatScratch;
|
|
|
|
Info.HasDynamicallySizedStack |= I->second.HasDynamicallySizedStack;
|
|
|
|
Info.HasRecursion |= I->second.HasRecursion;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Callee->doesNotRecurse())
|
|
|
|
Info.HasRecursion = true;
|
|
|
|
}
|
2017-06-01 23:05:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-02 09:31:28 +08:00
|
|
|
Info.NumExplicitSGPR = MaxSGPR + 1;
|
|
|
|
Info.NumVGPR = MaxVGPR + 1;
|
|
|
|
Info.PrivateSegmentSize += CalleeFrameSize;
|
2017-06-01 23:05:15 +08:00
|
|
|
|
|
|
|
return Info;
|
2017-05-03 01:14:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
|
|
|
|
const MachineFunction &MF) {
|
|
|
|
SIFunctionResourceInfo Info = analyzeResourceUsage(MF);
|
|
|
|
|
|
|
|
ProgInfo.NumVGPR = Info.NumVGPR;
|
|
|
|
ProgInfo.NumSGPR = Info.NumExplicitSGPR;
|
|
|
|
ProgInfo.ScratchSize = Info.PrivateSegmentSize;
|
|
|
|
ProgInfo.VCCUsed = Info.UsesVCC;
|
|
|
|
ProgInfo.FlatUsed = Info.UsesFlatScratch;
|
|
|
|
ProgInfo.DynamicCallStack = Info.HasDynamicallySizedStack || Info.HasRecursion;
|
|
|
|
|
2017-11-15 04:33:14 +08:00
|
|
|
if (!isUInt<32>(ProgInfo.ScratchSize)) {
|
2017-12-16 06:22:58 +08:00
|
|
|
DiagnosticInfoStackSize DiagStackSize(MF.getFunction(),
|
2017-11-15 04:33:14 +08:00
|
|
|
ProgInfo.ScratchSize, DS_Error);
|
2017-12-16 06:22:58 +08:00
|
|
|
MF.getFunction().getContext().diagnose(DiagStackSize);
|
2017-11-15 04:33:14 +08:00
|
|
|
}
|
|
|
|
|
2018-07-12 04:59:01 +08:00
|
|
|
const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
|
2017-05-03 01:14:00 +08:00
|
|
|
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
|
|
|
|
const SIInstrInfo *TII = STM.getInstrInfo();
|
|
|
|
const SIRegisterInfo *RI = &TII->getRegisterInfo();
|
|
|
|
|
2018-06-22 03:38:56 +08:00
|
|
|
// TODO(scott.linder): The calculations related to SGPR/VGPR blocks are
|
|
|
|
// duplicated in part in AMDGPUAsmParser::calculateGPRBlocks, and could be
|
|
|
|
// unified.
|
|
|
|
unsigned ExtraSGPRs = IsaInfo::getNumExtraSGPRs(
|
|
|
|
STM.getFeatureBits(), ProgInfo.VCCUsed, ProgInfo.FlatUsed);
|
2016-06-25 11:11:28 +08:00
|
|
|
|
2016-12-10 03:49:40 +08:00
|
|
|
// Check the addressable register limit before we add ExtraSGPRs.
|
|
|
|
if (STM.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS &&
|
|
|
|
!STM.hasSGPRInitBug()) {
|
2017-02-08 21:02:33 +08:00
|
|
|
unsigned MaxAddressableNumSGPRs = STM.getAddressableNumSGPRs();
|
2017-04-18 03:48:30 +08:00
|
|
|
if (ProgInfo.NumSGPR > MaxAddressableNumSGPRs) {
|
2016-12-10 03:49:40 +08:00
|
|
|
// This can happen due to a compiler bug or when using inline asm.
|
2017-12-16 06:22:58 +08:00
|
|
|
LLVMContext &Ctx = MF.getFunction().getContext();
|
|
|
|
DiagnosticInfoResourceLimit Diag(MF.getFunction(),
|
2016-12-10 03:49:40 +08:00
|
|
|
"addressable scalar registers",
|
2017-04-18 03:48:30 +08:00
|
|
|
ProgInfo.NumSGPR, DS_Error,
|
2017-02-08 22:05:23 +08:00
|
|
|
DK_ResourceLimit,
|
|
|
|
MaxAddressableNumSGPRs);
|
2016-12-10 03:49:40 +08:00
|
|
|
Ctx.diagnose(Diag);
|
2017-04-18 03:48:30 +08:00
|
|
|
ProgInfo.NumSGPR = MaxAddressableNumSGPRs - 1;
|
2016-12-10 03:49:40 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-07 04:22:28 +08:00
|
|
|
// Account for extra SGPRs and VGPRs reserved for debugger use.
|
2017-04-18 03:48:30 +08:00
|
|
|
ProgInfo.NumSGPR += ExtraSGPRs;
|
2014-06-27 01:22:30 +08:00
|
|
|
|
2018-04-12 01:18:36 +08:00
|
|
|
// Ensure there are enough SGPRs and VGPRs for wave dispatch, where wave
|
|
|
|
// dispatch registers are function args.
|
|
|
|
unsigned WaveDispatchNumSGPR = 0, WaveDispatchNumVGPR = 0;
|
|
|
|
for (auto &Arg : MF.getFunction().args()) {
|
|
|
|
unsigned NumRegs = (Arg.getType()->getPrimitiveSizeInBits() + 31) / 32;
|
|
|
|
if (Arg.hasAttribute(Attribute::InReg))
|
|
|
|
WaveDispatchNumSGPR += NumRegs;
|
|
|
|
else
|
|
|
|
WaveDispatchNumVGPR += NumRegs;
|
|
|
|
}
|
|
|
|
ProgInfo.NumSGPR = std::max(ProgInfo.NumSGPR, WaveDispatchNumSGPR);
|
|
|
|
ProgInfo.NumVGPR = std::max(ProgInfo.NumVGPR, WaveDispatchNumVGPR);
|
|
|
|
|
2016-09-07 04:22:28 +08:00
|
|
|
// Adjust number of registers used to meet default/requested minimum/maximum
|
|
|
|
// number of waves per execution unit request.
|
|
|
|
ProgInfo.NumSGPRsForWavesPerEU = std::max(
|
2017-04-18 03:48:30 +08:00
|
|
|
std::max(ProgInfo.NumSGPR, 1u), STM.getMinNumSGPRs(MFI->getMaxWavesPerEU()));
|
2016-09-07 04:22:28 +08:00
|
|
|
ProgInfo.NumVGPRsForWavesPerEU = std::max(
|
2017-04-18 03:48:30 +08:00
|
|
|
std::max(ProgInfo.NumVGPR, 1u), STM.getMinNumVGPRs(MFI->getMaxWavesPerEU()));
|
2016-09-07 04:22:28 +08:00
|
|
|
|
2016-12-10 03:49:40 +08:00
|
|
|
if (STM.getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS ||
|
|
|
|
STM.hasSGPRInitBug()) {
|
2017-02-08 22:05:23 +08:00
|
|
|
unsigned MaxAddressableNumSGPRs = STM.getAddressableNumSGPRs();
|
|
|
|
if (ProgInfo.NumSGPR > MaxAddressableNumSGPRs) {
|
|
|
|
// This can happen due to a compiler bug or when using inline asm to use
|
|
|
|
// the registers which are usually reserved for vcc etc.
|
2017-12-16 06:22:58 +08:00
|
|
|
LLVMContext &Ctx = MF.getFunction().getContext();
|
|
|
|
DiagnosticInfoResourceLimit Diag(MF.getFunction(),
|
2016-12-10 03:49:40 +08:00
|
|
|
"scalar registers",
|
|
|
|
ProgInfo.NumSGPR, DS_Error,
|
2017-02-08 22:05:23 +08:00
|
|
|
DK_ResourceLimit,
|
|
|
|
MaxAddressableNumSGPRs);
|
2016-12-10 03:49:40 +08:00
|
|
|
Ctx.diagnose(Diag);
|
2017-02-08 22:05:23 +08:00
|
|
|
ProgInfo.NumSGPR = MaxAddressableNumSGPRs;
|
|
|
|
ProgInfo.NumSGPRsForWavesPerEU = MaxAddressableNumSGPRs;
|
2016-12-10 03:49:40 +08:00
|
|
|
}
|
2016-10-29 04:31:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (STM.hasSGPRInitBug()) {
|
2017-02-08 22:05:23 +08:00
|
|
|
ProgInfo.NumSGPR =
|
|
|
|
AMDGPU::IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG;
|
|
|
|
ProgInfo.NumSGPRsForWavesPerEU =
|
|
|
|
AMDGPU::IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG;
|
2015-03-09 23:48:09 +08:00
|
|
|
}
|
|
|
|
|
2017-04-19 04:59:40 +08:00
|
|
|
if (MFI->getNumUserSGPRs() > STM.getMaxNumUserSGPRs()) {
|
2017-12-16 06:22:58 +08:00
|
|
|
LLVMContext &Ctx = MF.getFunction().getContext();
|
|
|
|
DiagnosticInfoResourceLimit Diag(MF.getFunction(), "user SGPRs",
|
2017-04-19 04:59:40 +08:00
|
|
|
MFI->getNumUserSGPRs(), DS_Error);
|
2016-06-21 02:13:04 +08:00
|
|
|
Ctx.diagnose(Diag);
|
2015-12-01 05:16:07 +08:00
|
|
|
}
|
|
|
|
|
2016-07-27 00:45:58 +08:00
|
|
|
if (MFI->getLDSSize() > static_cast<unsigned>(STM.getLocalMemorySize())) {
|
2017-12-16 06:22:58 +08:00
|
|
|
LLVMContext &Ctx = MF.getFunction().getContext();
|
|
|
|
DiagnosticInfoResourceLimit Diag(MF.getFunction(), "local memory",
|
2016-07-27 00:45:58 +08:00
|
|
|
MFI->getLDSSize(), DS_Error);
|
2016-06-21 02:13:04 +08:00
|
|
|
Ctx.diagnose(Diag);
|
2016-04-29 03:37:35 +08:00
|
|
|
}
|
|
|
|
|
2018-06-22 03:38:56 +08:00
|
|
|
ProgInfo.SGPRBlocks = IsaInfo::getNumSGPRBlocks(
|
|
|
|
STM.getFeatureBits(), ProgInfo.NumSGPRsForWavesPerEU);
|
|
|
|
ProgInfo.VGPRBlocks = IsaInfo::getNumVGPRBlocks(
|
|
|
|
STM.getFeatureBits(), ProgInfo.NumVGPRsForWavesPerEU);
|
2017-02-08 21:02:33 +08:00
|
|
|
|
|
|
|
// Update DebuggerWavefrontPrivateSegmentOffsetSGPR and
|
|
|
|
// DebuggerPrivateSegmentBufferSGPR fields if "amdgpu-debugger-emit-prologue"
|
|
|
|
// attribute was requested.
|
|
|
|
if (STM.debuggerEmitPrologue()) {
|
|
|
|
ProgInfo.DebuggerWavefrontPrivateSegmentOffsetSGPR =
|
|
|
|
RI->getHWRegIndex(MFI->getScratchWaveOffsetReg());
|
|
|
|
ProgInfo.DebuggerPrivateSegmentBufferSGPR =
|
|
|
|
RI->getHWRegIndex(MFI->getScratchRSrcReg());
|
|
|
|
}
|
2016-09-07 04:22:28 +08:00
|
|
|
|
2014-06-27 01:22:30 +08:00
|
|
|
// Set the value to initialize FP_ROUND and FP_DENORM parts of the mode
|
|
|
|
// register.
|
|
|
|
ProgInfo.FloatMode = getFPMode(MF);
|
|
|
|
|
2016-10-20 06:34:49 +08:00
|
|
|
ProgInfo.IEEEMode = STM.enableIEEEBit(MF);
|
2014-06-27 01:22:30 +08:00
|
|
|
|
2016-01-29 04:53:35 +08:00
|
|
|
// Make clamp modifier on NaN input returns 0.
|
2017-02-22 07:35:48 +08:00
|
|
|
ProgInfo.DX10Clamp = STM.enableDX10Clamp();
|
2014-06-27 01:22:30 +08:00
|
|
|
|
2013-10-30 00:37:28 +08:00
|
|
|
unsigned LDSAlignShift;
|
2018-07-12 04:59:01 +08:00
|
|
|
if (STM.getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) {
|
2014-06-27 01:22:30 +08:00
|
|
|
// LDS is allocated in 64 dword blocks.
|
2013-10-30 00:37:28 +08:00
|
|
|
LDSAlignShift = 8;
|
|
|
|
} else {
|
2014-06-27 01:22:30 +08:00
|
|
|
// LDS is allocated in 128 dword blocks.
|
2013-10-30 00:37:28 +08:00
|
|
|
LDSAlignShift = 9;
|
|
|
|
}
|
2014-06-27 01:22:30 +08:00
|
|
|
|
2016-09-07 04:22:28 +08:00
|
|
|
unsigned LDSSpillSize =
|
2017-04-19 04:59:40 +08:00
|
|
|
MFI->getLDSWaveSpillSize() * MFI->getMaxFlatWorkGroupSize();
|
2014-09-24 09:33:17 +08:00
|
|
|
|
2016-07-27 00:45:58 +08:00
|
|
|
ProgInfo.LDSSize = MFI->getLDSSize() + LDSSpillSize;
|
2014-12-03 05:28:53 +08:00
|
|
|
ProgInfo.LDSBlocks =
|
2016-03-31 05:30:00 +08:00
|
|
|
alignTo(ProgInfo.LDSSize, 1ULL << LDSAlignShift) >> LDSAlignShift;
|
2013-10-30 00:37:28 +08:00
|
|
|
|
2014-07-21 23:45:01 +08:00
|
|
|
// Scratch is allocated in 256 dword blocks.
|
|
|
|
unsigned ScratchAlignShift = 10;
|
|
|
|
// We need to program the hardware with the amount of scratch memory that
|
2014-12-03 05:28:53 +08:00
|
|
|
// is used by the entire wave. ProgInfo.ScratchSize is the amount of
|
2014-07-21 23:45:01 +08:00
|
|
|
// scratch memory used per thread.
|
2014-12-03 05:28:53 +08:00
|
|
|
ProgInfo.ScratchBlocks =
|
2016-01-15 05:06:47 +08:00
|
|
|
alignTo(ProgInfo.ScratchSize * STM.getWavefrontSize(),
|
2016-03-31 05:30:00 +08:00
|
|
|
1ULL << ScratchAlignShift) >>
|
2016-01-15 05:06:47 +08:00
|
|
|
ScratchAlignShift;
|
2014-07-21 23:45:01 +08:00
|
|
|
|
2014-12-03 05:28:53 +08:00
|
|
|
ProgInfo.ComputePGMRSrc1 =
|
|
|
|
S_00B848_VGPRS(ProgInfo.VGPRBlocks) |
|
|
|
|
S_00B848_SGPRS(ProgInfo.SGPRBlocks) |
|
|
|
|
S_00B848_PRIORITY(ProgInfo.Priority) |
|
|
|
|
S_00B848_FLOAT_MODE(ProgInfo.FloatMode) |
|
|
|
|
S_00B848_PRIV(ProgInfo.Priv) |
|
|
|
|
S_00B848_DX10_CLAMP(ProgInfo.DX10Clamp) |
|
2015-12-01 05:16:03 +08:00
|
|
|
S_00B848_DEBUG_MODE(ProgInfo.DebugMode) |
|
2014-12-03 05:28:53 +08:00
|
|
|
S_00B848_IEEE_MODE(ProgInfo.IEEEMode);
|
|
|
|
|
2015-12-01 05:16:03 +08:00
|
|
|
// 0 = X, 1 = XY, 2 = XYZ
|
|
|
|
unsigned TIDIGCompCnt = 0;
|
|
|
|
if (MFI->hasWorkItemIDZ())
|
|
|
|
TIDIGCompCnt = 2;
|
|
|
|
else if (MFI->hasWorkItemIDY())
|
|
|
|
TIDIGCompCnt = 1;
|
|
|
|
|
2014-12-03 05:28:53 +08:00
|
|
|
ProgInfo.ComputePGMRSrc2 =
|
|
|
|
S_00B84C_SCRATCH_EN(ProgInfo.ScratchBlocks > 0) |
|
2015-12-01 05:16:03 +08:00
|
|
|
S_00B84C_USER_SGPR(MFI->getNumUserSGPRs()) |
|
2018-05-30 03:09:13 +08:00
|
|
|
// For AMDHSA, TRAP_HANDLER must be zero, as it is populated by the CP.
|
|
|
|
S_00B84C_TRAP_HANDLER(STM.isAmdHsaOS() ? 0 : STM.isTrapHandlerEnabled()) |
|
2015-12-01 05:16:03 +08:00
|
|
|
S_00B84C_TGID_X_EN(MFI->hasWorkGroupIDX()) |
|
|
|
|
S_00B84C_TGID_Y_EN(MFI->hasWorkGroupIDY()) |
|
|
|
|
S_00B84C_TGID_Z_EN(MFI->hasWorkGroupIDZ()) |
|
|
|
|
S_00B84C_TG_SIZE_EN(MFI->hasWorkGroupInfo()) |
|
|
|
|
S_00B84C_TIDIG_COMP_CNT(TIDIGCompCnt) |
|
|
|
|
S_00B84C_EXCP_EN_MSB(0) |
|
2017-05-06 04:13:55 +08:00
|
|
|
// For AMDHSA, LDS_SIZE must be zero, as it is populated by the CP.
|
|
|
|
S_00B84C_LDS_SIZE(STM.isAmdHsaOS() ? 0 : ProgInfo.LDSBlocks) |
|
2015-12-01 05:16:03 +08:00
|
|
|
S_00B84C_EXCP_EN(0);
|
2014-12-03 05:28:53 +08:00
|
|
|
}
|
|
|
|
|
2016-04-07 03:40:20 +08:00
|
|
|
static unsigned getRsrcReg(CallingConv::ID CallConv) {
|
|
|
|
switch (CallConv) {
|
2016-08-18 04:30:52 +08:00
|
|
|
default: LLVM_FALLTHROUGH;
|
2016-04-07 03:40:20 +08:00
|
|
|
case CallingConv::AMDGPU_CS: return R_00B848_COMPUTE_PGM_RSRC1;
|
2017-09-29 17:51:22 +08:00
|
|
|
case CallingConv::AMDGPU_LS: return R_00B528_SPI_SHADER_PGM_RSRC1_LS;
|
2017-05-02 23:41:10 +08:00
|
|
|
case CallingConv::AMDGPU_HS: return R_00B428_SPI_SHADER_PGM_RSRC1_HS;
|
2017-09-29 17:51:22 +08:00
|
|
|
case CallingConv::AMDGPU_ES: return R_00B328_SPI_SHADER_PGM_RSRC1_ES;
|
2016-04-07 03:40:20 +08:00
|
|
|
case CallingConv::AMDGPU_GS: return R_00B228_SPI_SHADER_PGM_RSRC1_GS;
|
|
|
|
case CallingConv::AMDGPU_VS: return R_00B128_SPI_SHADER_PGM_RSRC1_VS;
|
2017-09-29 17:51:22 +08:00
|
|
|
case CallingConv::AMDGPU_PS: return R_00B028_SPI_SHADER_PGM_RSRC1_PS;
|
2014-12-03 05:28:53 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void AMDGPUAsmPrinter::EmitProgramInfoSI(const MachineFunction &MF,
|
2017-05-03 01:14:00 +08:00
|
|
|
const SIProgramInfo &CurrentProgramInfo) {
|
2018-07-12 04:59:01 +08:00
|
|
|
const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
|
2014-12-03 05:28:53 +08:00
|
|
|
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
|
2017-12-16 06:22:58 +08:00
|
|
|
unsigned RsrcReg = getRsrcReg(MF.getFunction().getCallingConv());
|
2014-09-20 04:42:37 +08:00
|
|
|
|
2017-12-16 06:22:58 +08:00
|
|
|
if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) {
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitIntValue(R_00B848_COMPUTE_PGM_RSRC1, 4);
|
2014-06-27 01:22:30 +08:00
|
|
|
|
2017-05-03 01:14:00 +08:00
|
|
|
OutStreamer->EmitIntValue(CurrentProgramInfo.ComputePGMRSrc1, 4);
|
2014-06-27 01:22:30 +08:00
|
|
|
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitIntValue(R_00B84C_COMPUTE_PGM_RSRC2, 4);
|
2017-05-03 01:14:00 +08:00
|
|
|
OutStreamer->EmitIntValue(CurrentProgramInfo.ComputePGMRSrc2, 4);
|
2014-07-21 23:45:01 +08:00
|
|
|
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitIntValue(R_00B860_COMPUTE_TMPRING_SIZE, 4);
|
2017-05-03 01:14:00 +08:00
|
|
|
OutStreamer->EmitIntValue(S_00B860_WAVESIZE(CurrentProgramInfo.ScratchBlocks), 4);
|
2014-09-15 23:41:53 +08:00
|
|
|
|
|
|
|
// TODO: Should probably note flat usage somewhere. SC emits a "FlatPtr32 =
|
|
|
|
// 0" comment but I don't see a corresponding field in the register spec.
|
2014-06-27 01:22:30 +08:00
|
|
|
} else {
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitIntValue(RsrcReg, 4);
|
2017-05-03 01:14:00 +08:00
|
|
|
OutStreamer->EmitIntValue(S_00B028_VGPRS(CurrentProgramInfo.VGPRBlocks) |
|
|
|
|
S_00B028_SGPRS(CurrentProgramInfo.SGPRBlocks), 4);
|
2017-12-16 06:22:58 +08:00
|
|
|
if (STM.isVGPRSpillingEnabled(MF.getFunction())) {
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitIntValue(R_0286E8_SPI_TMPRING_SIZE, 4);
|
2017-05-03 01:14:00 +08:00
|
|
|
OutStreamer->EmitIntValue(S_0286E8_WAVESIZE(CurrentProgramInfo.ScratchBlocks), 4);
|
2015-01-21 03:33:04 +08:00
|
|
|
}
|
2013-04-16 01:51:35 +08:00
|
|
|
}
|
2016-07-14 01:35:15 +08:00
|
|
|
|
2018-02-06 21:39:38 +08:00
|
|
|
if (MF.getFunction().getCallingConv() == CallingConv::AMDGPU_PS) {
|
|
|
|
OutStreamer->EmitIntValue(R_00B02C_SPI_SHADER_PGM_RSRC2_PS, 4);
|
|
|
|
OutStreamer->EmitIntValue(S_00B02C_EXTRA_LDS_SIZE(CurrentProgramInfo.LDSBlocks), 4);
|
|
|
|
OutStreamer->EmitIntValue(R_0286CC_SPI_PS_INPUT_ENA, 4);
|
|
|
|
OutStreamer->EmitIntValue(MFI->getPSInputEnable(), 4);
|
|
|
|
OutStreamer->EmitIntValue(R_0286D0_SPI_PS_INPUT_ADDR, 4);
|
|
|
|
OutStreamer->EmitIntValue(MFI->getPSInputAddr(), 4);
|
|
|
|
}
|
|
|
|
|
2016-07-14 01:35:15 +08:00
|
|
|
OutStreamer->EmitIntValue(R_SPILLED_SGPRS, 4);
|
|
|
|
OutStreamer->EmitIntValue(MFI->getNumSpilledSGPRs(), 4);
|
|
|
|
OutStreamer->EmitIntValue(R_SPILLED_VGPRS, 4);
|
|
|
|
OutStreamer->EmitIntValue(MFI->getNumSpilledVGPRs(), 4);
|
2012-12-12 05:25:42 +08:00
|
|
|
}
|
2014-12-03 06:00:07 +08:00
|
|
|
|
2017-10-04 03:03:52 +08:00
|
|
|
// This is the equivalent of EmitProgramInfoSI above, but for when the OS type
|
|
|
|
// is AMDPAL. It stores each compute/SPI register setting and other PAL
|
2017-10-12 06:41:09 +08:00
|
|
|
// metadata items into the PALMetadataMap, combining with any provided by the
|
|
|
|
// frontend as LLVM metadata. Once all functions are written, PALMetadataMap is
|
2017-10-04 03:03:52 +08:00
|
|
|
// then written as a single block in the .note section.
|
2017-10-12 06:41:09 +08:00
|
|
|
void AMDGPUAsmPrinter::EmitPALMetadata(const MachineFunction &MF,
|
2017-10-04 03:03:52 +08:00
|
|
|
const SIProgramInfo &CurrentProgramInfo) {
|
|
|
|
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
|
|
|
|
// Given the calling convention, calculate the register number for rsrc1. In
|
|
|
|
// principle the register number could change in future hardware, but we know
|
|
|
|
// it is the same for gfx6-9 (except that LS and ES don't exist on gfx9), so
|
|
|
|
// we can use the same fixed value that .AMDGPU.config has for Mesa. Note
|
|
|
|
// that we use a register number rather than a byte offset, so we need to
|
|
|
|
// divide by 4.
|
2017-12-16 06:22:58 +08:00
|
|
|
unsigned Rsrc1Reg = getRsrcReg(MF.getFunction().getCallingConv()) / 4;
|
2017-10-04 03:03:52 +08:00
|
|
|
unsigned Rsrc2Reg = Rsrc1Reg + 1;
|
|
|
|
// Also calculate the PAL metadata key for *S_SCRATCH_SIZE. It can be used
|
|
|
|
// with a constant offset to access any non-register shader-specific PAL
|
|
|
|
// metadata key.
|
2017-10-12 06:41:09 +08:00
|
|
|
unsigned ScratchSizeKey = PALMD::Key::CS_SCRATCH_SIZE;
|
2017-12-16 06:22:58 +08:00
|
|
|
switch (MF.getFunction().getCallingConv()) {
|
2017-10-04 03:03:52 +08:00
|
|
|
case CallingConv::AMDGPU_PS:
|
2017-10-12 06:41:09 +08:00
|
|
|
ScratchSizeKey = PALMD::Key::PS_SCRATCH_SIZE;
|
2017-10-04 03:03:52 +08:00
|
|
|
break;
|
|
|
|
case CallingConv::AMDGPU_VS:
|
2017-10-12 06:41:09 +08:00
|
|
|
ScratchSizeKey = PALMD::Key::VS_SCRATCH_SIZE;
|
2017-10-04 03:03:52 +08:00
|
|
|
break;
|
|
|
|
case CallingConv::AMDGPU_GS:
|
2017-10-12 06:41:09 +08:00
|
|
|
ScratchSizeKey = PALMD::Key::GS_SCRATCH_SIZE;
|
2017-10-04 03:03:52 +08:00
|
|
|
break;
|
|
|
|
case CallingConv::AMDGPU_ES:
|
2017-10-12 06:41:09 +08:00
|
|
|
ScratchSizeKey = PALMD::Key::ES_SCRATCH_SIZE;
|
2017-10-04 03:03:52 +08:00
|
|
|
break;
|
|
|
|
case CallingConv::AMDGPU_HS:
|
2017-10-12 06:41:09 +08:00
|
|
|
ScratchSizeKey = PALMD::Key::HS_SCRATCH_SIZE;
|
2017-10-04 03:03:52 +08:00
|
|
|
break;
|
|
|
|
case CallingConv::AMDGPU_LS:
|
2017-10-12 06:41:09 +08:00
|
|
|
ScratchSizeKey = PALMD::Key::LS_SCRATCH_SIZE;
|
2017-10-04 03:03:52 +08:00
|
|
|
break;
|
|
|
|
}
|
2017-10-12 06:41:09 +08:00
|
|
|
unsigned NumUsedVgprsKey = ScratchSizeKey +
|
|
|
|
PALMD::Key::VS_NUM_USED_VGPRS - PALMD::Key::VS_SCRATCH_SIZE;
|
|
|
|
unsigned NumUsedSgprsKey = ScratchSizeKey +
|
|
|
|
PALMD::Key::VS_NUM_USED_SGPRS - PALMD::Key::VS_SCRATCH_SIZE;
|
|
|
|
PALMetadataMap[NumUsedVgprsKey] = CurrentProgramInfo.NumVGPRsForWavesPerEU;
|
|
|
|
PALMetadataMap[NumUsedSgprsKey] = CurrentProgramInfo.NumSGPRsForWavesPerEU;
|
2017-12-16 06:22:58 +08:00
|
|
|
if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) {
|
2017-10-12 06:41:09 +08:00
|
|
|
PALMetadataMap[Rsrc1Reg] |= CurrentProgramInfo.ComputePGMRSrc1;
|
|
|
|
PALMetadataMap[Rsrc2Reg] |= CurrentProgramInfo.ComputePGMRSrc2;
|
2017-10-04 03:03:52 +08:00
|
|
|
// ScratchSize is in bytes, 16 aligned.
|
2017-10-12 06:41:09 +08:00
|
|
|
PALMetadataMap[ScratchSizeKey] |=
|
|
|
|
alignTo(CurrentProgramInfo.ScratchSize, 16);
|
2017-10-04 03:03:52 +08:00
|
|
|
} else {
|
2017-10-12 06:41:09 +08:00
|
|
|
PALMetadataMap[Rsrc1Reg] |= S_00B028_VGPRS(CurrentProgramInfo.VGPRBlocks) |
|
|
|
|
S_00B028_SGPRS(CurrentProgramInfo.SGPRBlocks);
|
2017-10-04 03:03:52 +08:00
|
|
|
if (CurrentProgramInfo.ScratchBlocks > 0)
|
2017-10-12 06:41:09 +08:00
|
|
|
PALMetadataMap[Rsrc2Reg] |= S_00B84C_SCRATCH_EN(1);
|
2017-10-04 03:03:52 +08:00
|
|
|
// ScratchSize is in bytes, 16 aligned.
|
2017-10-12 06:41:09 +08:00
|
|
|
PALMetadataMap[ScratchSizeKey] |=
|
|
|
|
alignTo(CurrentProgramInfo.ScratchSize, 16);
|
2017-10-04 03:03:52 +08:00
|
|
|
}
|
2017-12-16 06:22:58 +08:00
|
|
|
if (MF.getFunction().getCallingConv() == CallingConv::AMDGPU_PS) {
|
2017-10-12 06:41:09 +08:00
|
|
|
PALMetadataMap[Rsrc2Reg] |=
|
|
|
|
S_00B02C_EXTRA_LDS_SIZE(CurrentProgramInfo.LDSBlocks);
|
|
|
|
PALMetadataMap[R_0286CC_SPI_PS_INPUT_ENA / 4] |= MFI->getPSInputEnable();
|
|
|
|
PALMetadataMap[R_0286D0_SPI_PS_INPUT_ADDR / 4] |= MFI->getPSInputAddr();
|
2017-10-04 03:03:52 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-12 10:40:47 +08:00
|
|
|
// This is supposed to be log2(Size)
|
|
|
|
static amd_element_byte_size_t getElementByteSizeValue(unsigned Size) {
|
|
|
|
switch (Size) {
|
|
|
|
case 4:
|
|
|
|
return AMD_ELEMENT_4_BYTES;
|
|
|
|
case 8:
|
|
|
|
return AMD_ELEMENT_8_BYTES;
|
|
|
|
case 16:
|
|
|
|
return AMD_ELEMENT_16_BYTES;
|
|
|
|
default:
|
|
|
|
llvm_unreachable("invalid private_element_size");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-23 06:54:39 +08:00
|
|
|
void AMDGPUAsmPrinter::getAmdKernelCode(amd_kernel_code_t &Out,
|
2017-05-03 01:14:00 +08:00
|
|
|
const SIProgramInfo &CurrentProgramInfo,
|
2017-03-23 06:54:39 +08:00
|
|
|
const MachineFunction &MF) const {
|
2018-07-20 17:05:08 +08:00
|
|
|
const Function &F = MF.getFunction();
|
|
|
|
assert(F.getCallingConv() == CallingConv::AMDGPU_KERNEL ||
|
|
|
|
F.getCallingConv() == CallingConv::SPIR_KERNEL);
|
|
|
|
|
2014-12-03 06:00:07 +08:00
|
|
|
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
|
2018-07-12 04:59:01 +08:00
|
|
|
const GCNSubtarget &STM = MF.getSubtarget<GCNSubtarget>();
|
2014-12-03 06:00:07 +08:00
|
|
|
|
2017-03-23 06:54:39 +08:00
|
|
|
AMDGPU::initDefaultAMDKernelCodeT(Out, STM.getFeatureBits());
|
2014-12-03 06:00:07 +08:00
|
|
|
|
2017-03-23 06:54:39 +08:00
|
|
|
Out.compute_pgm_resource_registers =
|
2017-05-03 01:14:00 +08:00
|
|
|
CurrentProgramInfo.ComputePGMRSrc1 |
|
|
|
|
(CurrentProgramInfo.ComputePGMRSrc2 << 32);
|
2017-03-23 06:54:39 +08:00
|
|
|
Out.code_properties = AMD_CODE_PROPERTY_IS_PTR64;
|
2016-02-12 10:40:47 +08:00
|
|
|
|
2017-05-03 01:14:00 +08:00
|
|
|
if (CurrentProgramInfo.DynamicCallStack)
|
|
|
|
Out.code_properties |= AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK;
|
|
|
|
|
2017-03-23 06:54:39 +08:00
|
|
|
AMD_HSA_BITS_SET(Out.code_properties,
|
2016-02-12 10:40:47 +08:00
|
|
|
AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE,
|
|
|
|
getElementByteSizeValue(STM.getMaxPrivateElementSize()));
|
|
|
|
|
2015-12-01 05:16:03 +08:00
|
|
|
if (MFI->hasPrivateSegmentBuffer()) {
|
2017-03-23 06:54:39 +08:00
|
|
|
Out.code_properties |=
|
2015-12-01 05:16:03 +08:00
|
|
|
AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (MFI->hasDispatchPtr())
|
2017-03-23 06:54:39 +08:00
|
|
|
Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR;
|
2015-12-01 05:16:03 +08:00
|
|
|
|
|
|
|
if (MFI->hasQueuePtr())
|
2017-03-23 06:54:39 +08:00
|
|
|
Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR;
|
2015-12-01 05:16:03 +08:00
|
|
|
|
|
|
|
if (MFI->hasKernargSegmentPtr())
|
2017-03-23 06:54:39 +08:00
|
|
|
Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR;
|
2015-12-01 05:16:03 +08:00
|
|
|
|
|
|
|
if (MFI->hasDispatchID())
|
2017-03-23 06:54:39 +08:00
|
|
|
Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID;
|
2015-12-01 05:16:03 +08:00
|
|
|
|
|
|
|
if (MFI->hasFlatScratchInit())
|
2017-03-23 06:54:39 +08:00
|
|
|
Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT;
|
2015-12-01 05:16:03 +08:00
|
|
|
|
2015-11-26 08:43:29 +08:00
|
|
|
if (MFI->hasDispatchPtr())
|
2017-03-23 06:54:39 +08:00
|
|
|
Out.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR;
|
2015-11-26 08:43:29 +08:00
|
|
|
|
2016-06-25 11:11:28 +08:00
|
|
|
if (STM.debuggerSupported())
|
2017-03-23 06:54:39 +08:00
|
|
|
Out.code_properties |= AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED;
|
2016-06-25 11:11:28 +08:00
|
|
|
|
2016-01-05 07:35:53 +08:00
|
|
|
if (STM.isXNACKEnabled())
|
2017-03-23 06:54:39 +08:00
|
|
|
Out.code_properties |= AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED;
|
2016-01-05 07:35:53 +08:00
|
|
|
|
2018-07-20 17:05:08 +08:00
|
|
|
unsigned MaxKernArgAlign;
|
|
|
|
Out.kernarg_segment_byte_size = STM.getKernArgSegmentSize(F, MaxKernArgAlign);
|
2017-05-03 01:14:00 +08:00
|
|
|
Out.wavefront_sgpr_count = CurrentProgramInfo.NumSGPR;
|
|
|
|
Out.workitem_vgpr_count = CurrentProgramInfo.NumVGPR;
|
|
|
|
Out.workitem_private_segment_byte_size = CurrentProgramInfo.ScratchSize;
|
|
|
|
Out.workgroup_group_segment_byte_size = CurrentProgramInfo.LDSSize;
|
2014-12-03 06:00:07 +08:00
|
|
|
|
2016-12-07 05:53:10 +08:00
|
|
|
// These alignment values are specified in powers of two, so alignment =
|
|
|
|
// 2^n. The minimum alignment is 2^4 = 16.
|
2017-03-23 06:54:39 +08:00
|
|
|
Out.kernarg_segment_alignment = std::max((size_t)4,
|
2018-07-20 17:05:08 +08:00
|
|
|
countTrailingZeros(MaxKernArgAlign));
|
2016-12-07 05:53:10 +08:00
|
|
|
|
2016-06-25 11:11:28 +08:00
|
|
|
if (STM.debuggerEmitPrologue()) {
|
2017-03-23 06:54:39 +08:00
|
|
|
Out.debug_wavefront_private_segment_offset_sgpr =
|
2017-05-03 01:14:00 +08:00
|
|
|
CurrentProgramInfo.DebuggerWavefrontPrivateSegmentOffsetSGPR;
|
2017-03-23 06:54:39 +08:00
|
|
|
Out.debug_private_segment_buffer_sgpr =
|
2017-05-03 01:14:00 +08:00
|
|
|
CurrentProgramInfo.DebuggerPrivateSegmentBufferSGPR;
|
2016-06-25 11:11:28 +08:00
|
|
|
}
|
2014-12-03 06:00:07 +08:00
|
|
|
}
|
2015-04-08 09:09:26 +08:00
|
|
|
|
|
|
|
bool AMDGPUAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
|
|
|
|
unsigned AsmVariant,
|
|
|
|
const char *ExtraCode, raw_ostream &O) {
|
2017-08-10 04:09:35 +08:00
|
|
|
// First try the generic code, which knows about modifiers like 'c' and 'n'.
|
|
|
|
if (!AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O))
|
|
|
|
return false;
|
|
|
|
|
2015-04-08 09:09:26 +08:00
|
|
|
if (ExtraCode && ExtraCode[0]) {
|
|
|
|
if (ExtraCode[1] != 0)
|
|
|
|
return true; // Unknown modifier.
|
|
|
|
|
|
|
|
switch (ExtraCode[0]) {
|
|
|
|
case 'r':
|
|
|
|
break;
|
2017-08-10 04:09:35 +08:00
|
|
|
default:
|
|
|
|
return true;
|
2015-04-08 09:09:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-10 04:09:35 +08:00
|
|
|
// TODO: Should be able to support other operand types like globals.
|
|
|
|
const MachineOperand &MO = MI->getOperand(OpNo);
|
|
|
|
if (MO.isReg()) {
|
|
|
|
AMDGPUInstPrinter::printRegOperand(MO.getReg(), O,
|
|
|
|
*MF->getSubtarget().getRegisterInfo());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
2015-04-08 09:09:26 +08:00
|
|
|
}
|