2012-12-12 05:25:42 +08:00
|
|
|
//===-- AMDGPUAsmPrinter.cpp - AMDGPU Assebly printer --------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
/// \file
|
|
|
|
///
|
|
|
|
/// The AMDGPUAsmPrinter is used to print both assembly string and also binary
|
|
|
|
/// code. When passed an MCAsmStreamer it prints assembly and when passed
|
|
|
|
/// an MCObjectStreamer it outputs binary code.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
|
|
|
|
#include "AMDGPUAsmPrinter.h"
|
2015-06-27 05:15:07 +08:00
|
|
|
#include "MCTargetDesc/AMDGPUTargetStreamer.h"
|
2015-04-08 09:09:26 +08:00
|
|
|
#include "InstPrinter/AMDGPUInstPrinter.h"
|
2015-06-27 05:15:07 +08:00
|
|
|
#include "Utils/AMDGPUBaseInfo.h"
|
2012-12-12 05:25:42 +08:00
|
|
|
#include "AMDGPU.h"
|
2014-12-03 06:00:07 +08:00
|
|
|
#include "AMDKernelCodeT.h"
|
2014-06-13 09:32:00 +08:00
|
|
|
#include "AMDGPUSubtarget.h"
|
2013-05-07 01:50:51 +08:00
|
|
|
#include "R600Defines.h"
|
2013-04-24 01:34:12 +08:00
|
|
|
#include "R600MachineFunctionInfo.h"
|
2013-04-17 23:17:25 +08:00
|
|
|
#include "R600RegisterInfo.h"
|
2013-05-24 01:10:37 +08:00
|
|
|
#include "SIDefines.h"
|
|
|
|
#include "SIMachineFunctionInfo.h"
|
|
|
|
#include "SIRegisterInfo.h"
|
2014-07-21 23:45:01 +08:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
2013-04-16 01:51:30 +08:00
|
|
|
#include "llvm/MC/MCContext.h"
|
|
|
|
#include "llvm/MC/MCSectionELF.h"
|
2012-12-12 05:25:42 +08:00
|
|
|
#include "llvm/MC/MCStreamer.h"
|
2013-04-16 01:51:30 +08:00
|
|
|
#include "llvm/Support/ELF.h"
|
2013-06-28 23:47:08 +08:00
|
|
|
#include "llvm/Support/MathExtras.h"
|
2012-12-12 05:25:42 +08:00
|
|
|
#include "llvm/Support/TargetRegistry.h"
|
2013-01-02 18:22:59 +08:00
|
|
|
#include "llvm/Target/TargetLoweringObjectFile.h"
|
2012-12-12 05:25:42 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2014-06-27 01:22:30 +08:00
|
|
|
// TODO: This should get the default rounding mode from the kernel. We just set
|
|
|
|
// the default here, but this could change if the OpenCL rounding mode pragmas
|
|
|
|
// are used.
|
|
|
|
//
|
|
|
|
// The denormal mode here should match what is reported by the OpenCL runtime
|
|
|
|
// for the CL_FP_DENORM bit from CL_DEVICE_{HALF|SINGLE|DOUBLE}_FP_CONFIG, but
|
|
|
|
// can also be override to flush with the -cl-denorms-are-zero compiler flag.
|
|
|
|
//
|
|
|
|
// AMD OpenCL only sets flush none and reports CL_FP_DENORM for double
|
|
|
|
// precision, and leaves single precision to flush all and does not report
|
|
|
|
// CL_FP_DENORM for CL_DEVICE_SINGLE_FP_CONFIG. Mesa's OpenCL currently reports
|
|
|
|
// CL_FP_DENORM for both.
|
2014-07-15 07:40:43 +08:00
|
|
|
//
|
|
|
|
// FIXME: It seems some instructions do not support single precision denormals
|
|
|
|
// regardless of the mode (exp_*_f32, rcp_*_f32, rsq_*_f32, rsq_*f32, sqrt_f32,
|
|
|
|
// and sin_f32, cos_f32 on most parts).
|
|
|
|
|
|
|
|
// We want to use these instructions, and using fp32 denormals also causes
|
|
|
|
// instructions to run at the double precision rate for the device so it's
|
|
|
|
// probably best to just report no single precision denormals.
|
2014-07-15 07:40:49 +08:00
|
|
|
static uint32_t getFPMode(const MachineFunction &F) {
|
2015-02-19 09:10:53 +08:00
|
|
|
const AMDGPUSubtarget& ST = F.getSubtarget<AMDGPUSubtarget>();
|
2014-07-15 07:40:49 +08:00
|
|
|
// TODO: Is there any real use for the flush in only / flush out only modes?
|
|
|
|
|
|
|
|
uint32_t FP32Denormals =
|
|
|
|
ST.hasFP32Denormals() ? FP_DENORM_FLUSH_NONE : FP_DENORM_FLUSH_IN_FLUSH_OUT;
|
|
|
|
|
|
|
|
uint32_t FP64Denormals =
|
|
|
|
ST.hasFP64Denormals() ? FP_DENORM_FLUSH_NONE : FP_DENORM_FLUSH_IN_FLUSH_OUT;
|
|
|
|
|
2014-06-27 01:22:30 +08:00
|
|
|
return FP_ROUND_MODE_SP(FP_ROUND_ROUND_TO_NEAREST) |
|
|
|
|
FP_ROUND_MODE_DP(FP_ROUND_ROUND_TO_NEAREST) |
|
2014-07-15 07:40:49 +08:00
|
|
|
FP_DENORM_MODE_SP(FP32Denormals) |
|
|
|
|
FP_DENORM_MODE_DP(FP64Denormals);
|
2014-06-27 01:22:30 +08:00
|
|
|
}
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2015-01-19 04:29:04 +08:00
|
|
|
static AsmPrinter *
|
|
|
|
createAMDGPUAsmPrinterPass(TargetMachine &tm,
|
|
|
|
std::unique_ptr<MCStreamer> &&Streamer) {
|
|
|
|
return new AMDGPUAsmPrinter(tm, std::move(Streamer));
|
2012-12-12 05:25:42 +08:00
|
|
|
}
|
|
|
|
|
2015-06-13 11:28:10 +08:00
|
|
|
extern "C" void LLVMInitializeAMDGPUAsmPrinter() {
|
2012-12-12 05:25:42 +08:00
|
|
|
TargetRegistry::RegisterAsmPrinter(TheAMDGPUTarget, createAMDGPUAsmPrinterPass);
|
2015-01-07 02:00:21 +08:00
|
|
|
TargetRegistry::RegisterAsmPrinter(TheGCNTarget, createAMDGPUAsmPrinterPass);
|
2012-12-12 05:25:42 +08:00
|
|
|
}
|
|
|
|
|
2015-01-19 04:29:04 +08:00
|
|
|
AMDGPUAsmPrinter::AMDGPUAsmPrinter(TargetMachine &TM,
|
|
|
|
std::unique_ptr<MCStreamer> Streamer)
|
2015-02-19 09:10:49 +08:00
|
|
|
: AsmPrinter(TM, std::move(Streamer)) {}
|
2013-10-12 13:02:51 +08:00
|
|
|
|
2016-01-13 01:18:17 +08:00
|
|
|
void AMDGPUAsmPrinter::EmitStartOfAsmFile(Module &M) {
|
|
|
|
if (TM.getTargetTriple().getOS() != Triple::AMDHSA)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Need to construct an MCSubtargetInfo here in case we have no functions
|
|
|
|
// in the module.
|
|
|
|
std::unique_ptr<MCSubtargetInfo> STI(TM.getTarget().createMCSubtargetInfo(
|
|
|
|
TM.getTargetTriple().str(), TM.getTargetCPU(),
|
|
|
|
TM.getTargetFeatureString()));
|
|
|
|
|
|
|
|
AMDGPUTargetStreamer *TS =
|
|
|
|
static_cast<AMDGPUTargetStreamer *>(OutStreamer->getTargetStreamer());
|
|
|
|
|
|
|
|
TS->EmitDirectiveHSACodeObjectVersion(1, 0);
|
|
|
|
AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(STI->getFeatureBits());
|
|
|
|
TS->EmitDirectiveHSACodeObjectISA(ISA.Major, ISA.Minor, ISA.Stepping,
|
|
|
|
"AMD", "AMDGPU");
|
|
|
|
}
|
|
|
|
|
2015-06-27 05:14:58 +08:00
|
|
|
void AMDGPUAsmPrinter::EmitFunctionBodyStart() {
|
|
|
|
const AMDGPUSubtarget &STM = MF->getSubtarget<AMDGPUSubtarget>();
|
|
|
|
SIProgramInfo KernelInfo;
|
|
|
|
if (STM.isAmdHsaOS()) {
|
|
|
|
getSIProgramInfo(KernelInfo, *MF);
|
|
|
|
EmitAmdKernelCodeT(*MF, KernelInfo);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-06 19:45:14 +08:00
|
|
|
void AMDGPUAsmPrinter::EmitFunctionEntryLabel() {
|
|
|
|
const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
|
|
|
|
const AMDGPUSubtarget &STM = MF->getSubtarget<AMDGPUSubtarget>();
|
|
|
|
if (MFI->isKernel() && STM.isAmdHsaOS()) {
|
|
|
|
AMDGPUTargetStreamer *TS =
|
|
|
|
static_cast<AMDGPUTargetStreamer *>(OutStreamer->getTargetStreamer());
|
|
|
|
TS->EmitAMDGPUSymbolType(CurrentFnSym->getName(),
|
|
|
|
ELF::STT_AMDGPU_HSA_KERNEL);
|
|
|
|
}
|
|
|
|
|
|
|
|
AsmPrinter::EmitFunctionEntryLabel();
|
|
|
|
}
|
|
|
|
|
2015-12-03 03:47:57 +08:00
|
|
|
static bool isModuleLinkage(const GlobalValue *GV) {
|
|
|
|
switch (GV->getLinkage()) {
|
2016-04-06 00:00:58 +08:00
|
|
|
case GlobalValue::LinkOnceODRLinkage:
|
|
|
|
case GlobalValue::LinkOnceAnyLinkage:
|
2015-12-03 03:47:57 +08:00
|
|
|
case GlobalValue::InternalLinkage:
|
|
|
|
case GlobalValue::CommonLinkage:
|
|
|
|
return true;
|
|
|
|
case GlobalValue::ExternalLinkage:
|
|
|
|
return false;
|
|
|
|
default: llvm_unreachable("unknown linkage type");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-03 01:00:42 +08:00
|
|
|
void AMDGPUAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
|
|
|
|
|
2015-12-16 06:39:36 +08:00
|
|
|
if (TM.getTargetTriple().getOS() != Triple::AMDHSA) {
|
|
|
|
AsmPrinter::EmitGlobalVariable(GV);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (GV->isDeclaration() || GV->getLinkage() == GlobalValue::PrivateLinkage) {
|
2015-12-03 03:47:57 +08:00
|
|
|
AsmPrinter::EmitGlobalVariable(GV);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Group segment variables aren't emitted in HSA.
|
|
|
|
if (AMDGPU::isGroupSegment(GV))
|
|
|
|
return;
|
|
|
|
|
|
|
|
AMDGPUTargetStreamer *TS =
|
|
|
|
static_cast<AMDGPUTargetStreamer *>(OutStreamer->getTargetStreamer());
|
|
|
|
if (isModuleLinkage(GV)) {
|
|
|
|
TS->EmitAMDGPUHsaModuleScopeGlobal(GV->getName());
|
|
|
|
} else {
|
|
|
|
TS->EmitAMDGPUHsaProgramScopeGlobal(GV->getName());
|
|
|
|
}
|
|
|
|
|
2016-01-08 22:50:28 +08:00
|
|
|
MCSymbolELF *GVSym = cast<MCSymbolELF>(getSymbol(GV));
|
2015-12-03 03:47:57 +08:00
|
|
|
const DataLayout &DL = getDataLayout();
|
2016-01-08 22:50:28 +08:00
|
|
|
|
|
|
|
// Emit the size
|
|
|
|
uint64_t Size = DL.getTypeAllocSize(GV->getType()->getElementType());
|
|
|
|
OutStreamer->emitELFSize(GVSym, MCConstantExpr::create(Size, OutContext));
|
2015-12-03 03:47:57 +08:00
|
|
|
OutStreamer->PushSection();
|
|
|
|
OutStreamer->SwitchSection(
|
|
|
|
getObjFileLowering().SectionForGlobal(GV, *Mang, TM));
|
|
|
|
const Constant *C = GV->getInitializer();
|
|
|
|
OutStreamer->EmitLabel(GVSym);
|
|
|
|
EmitGlobalConstant(DL, C);
|
|
|
|
OutStreamer->PopSection();
|
2015-12-03 01:00:42 +08:00
|
|
|
}
|
|
|
|
|
2012-12-12 05:25:42 +08:00
|
|
|
bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
|
2014-10-04 03:02:02 +08:00
|
|
|
|
|
|
|
// The starting address of all shader programs must be 256 bytes aligned.
|
|
|
|
MF.setAlignment(8);
|
|
|
|
|
2012-12-12 05:25:42 +08:00
|
|
|
SetupMachineFunction(MF);
|
2013-12-05 13:15:35 +08:00
|
|
|
|
2013-10-12 13:02:51 +08:00
|
|
|
MCContext &Context = getObjFileLowering().getContext();
|
2015-05-22 03:20:38 +08:00
|
|
|
MCSectionELF *ConfigSection =
|
2015-01-30 01:33:21 +08:00
|
|
|
Context.getELFSection(".AMDGPU.config", ELF::SHT_PROGBITS, 0);
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->SwitchSection(ConfigSection);
|
2013-12-05 13:15:35 +08:00
|
|
|
|
2015-02-19 09:10:53 +08:00
|
|
|
const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>();
|
2013-12-05 13:15:35 +08:00
|
|
|
SIProgramInfo KernelInfo;
|
2015-06-27 05:14:58 +08:00
|
|
|
if (STM.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
|
2015-08-15 08:12:39 +08:00
|
|
|
getSIProgramInfo(KernelInfo, MF);
|
2015-06-27 05:14:58 +08:00
|
|
|
if (!STM.isAmdHsaOS()) {
|
|
|
|
EmitProgramInfoSI(MF, KernelInfo);
|
|
|
|
}
|
2013-04-17 23:17:25 +08:00
|
|
|
} else {
|
|
|
|
EmitProgramInfoR600(MF);
|
2012-12-12 05:25:42 +08:00
|
|
|
}
|
2013-10-12 13:02:51 +08:00
|
|
|
|
|
|
|
DisasmLines.clear();
|
|
|
|
HexLines.clear();
|
|
|
|
DisasmLineMaxLen = 0;
|
|
|
|
|
2012-12-12 05:25:42 +08:00
|
|
|
EmitFunctionBody();
|
2013-10-12 13:02:51 +08:00
|
|
|
|
2014-02-01 06:08:19 +08:00
|
|
|
if (isVerbose()) {
|
2015-05-22 03:20:38 +08:00
|
|
|
MCSectionELF *CommentSection =
|
2015-01-30 01:33:21 +08:00
|
|
|
Context.getELFSection(".AMDGPU.csdata", ELF::SHT_PROGBITS, 0);
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->SwitchSection(CommentSection);
|
2013-12-05 13:15:35 +08:00
|
|
|
|
2014-04-16 06:40:47 +08:00
|
|
|
if (STM.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->emitRawComment(" Kernel info:", false);
|
|
|
|
OutStreamer->emitRawComment(" codeLenInByte = " + Twine(KernelInfo.CodeLen),
|
|
|
|
false);
|
|
|
|
OutStreamer->emitRawComment(" NumSgprs: " + Twine(KernelInfo.NumSGPR),
|
|
|
|
false);
|
|
|
|
OutStreamer->emitRawComment(" NumVgprs: " + Twine(KernelInfo.NumVGPR),
|
|
|
|
false);
|
|
|
|
OutStreamer->emitRawComment(" FloatMode: " + Twine(KernelInfo.FloatMode),
|
|
|
|
false);
|
|
|
|
OutStreamer->emitRawComment(" IeeeMode: " + Twine(KernelInfo.IEEEMode),
|
|
|
|
false);
|
|
|
|
OutStreamer->emitRawComment(" ScratchSize: " + Twine(KernelInfo.ScratchSize),
|
|
|
|
false);
|
2016-04-15 06:11:51 +08:00
|
|
|
OutStreamer->emitRawComment(" LDSByteSize: " + Twine(KernelInfo.LDSSize) +
|
|
|
|
" bytes/workgroup (compile time only)", false);
|
2015-11-05 13:27:07 +08:00
|
|
|
|
|
|
|
OutStreamer->emitRawComment(" COMPUTE_PGM_RSRC2:USER_SGPR: " +
|
2015-11-11 08:27:46 +08:00
|
|
|
Twine(G_00B84C_USER_SGPR(KernelInfo.ComputePGMRSrc2)),
|
|
|
|
false);
|
|
|
|
OutStreamer->emitRawComment(" COMPUTE_PGM_RSRC2:TGID_X_EN: " +
|
|
|
|
Twine(G_00B84C_TGID_X_EN(KernelInfo.ComputePGMRSrc2)),
|
|
|
|
false);
|
|
|
|
OutStreamer->emitRawComment(" COMPUTE_PGM_RSRC2:TGID_Y_EN: " +
|
|
|
|
Twine(G_00B84C_TGID_Y_EN(KernelInfo.ComputePGMRSrc2)),
|
2015-11-05 13:27:07 +08:00
|
|
|
false);
|
2015-11-11 08:27:46 +08:00
|
|
|
OutStreamer->emitRawComment(" COMPUTE_PGM_RSRC2:TGID_Z_EN: " +
|
|
|
|
Twine(G_00B84C_TGID_Z_EN(KernelInfo.ComputePGMRSrc2)),
|
|
|
|
false);
|
|
|
|
OutStreamer->emitRawComment(" COMPUTE_PGM_RSRC2:TIDIG_COMP_CNT: " +
|
|
|
|
Twine(G_00B84C_TIDIG_COMP_CNT(KernelInfo.ComputePGMRSrc2)),
|
|
|
|
false);
|
|
|
|
|
2014-01-23 05:55:35 +08:00
|
|
|
} else {
|
|
|
|
R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->emitRawComment(
|
2014-01-23 05:55:35 +08:00
|
|
|
Twine("SQ_PGM_RESOURCES:STACK_SIZE = " + Twine(MFI->StackSize)));
|
|
|
|
}
|
2013-12-05 13:15:35 +08:00
|
|
|
}
|
|
|
|
|
2015-02-19 09:10:49 +08:00
|
|
|
if (STM.dumpCode()) {
|
2015-01-07 01:59:56 +08:00
|
|
|
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->SwitchSection(
|
2015-01-30 01:33:21 +08:00
|
|
|
Context.getELFSection(".AMDGPU.disasm", ELF::SHT_NOTE, 0));
|
2015-01-07 01:59:56 +08:00
|
|
|
|
|
|
|
for (size_t i = 0; i < DisasmLines.size(); ++i) {
|
|
|
|
std::string Comment(DisasmLineMaxLen - DisasmLines[i].size(), ' ');
|
|
|
|
Comment += " ; " + HexLines[i] + "\n";
|
|
|
|
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitBytes(StringRef(DisasmLines[i]));
|
|
|
|
OutStreamer->EmitBytes(StringRef(Comment));
|
2013-10-12 13:02:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-12 05:25:42 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-07-13 11:06:43 +08:00
|
|
|
void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) {
|
2013-04-17 23:17:25 +08:00
|
|
|
unsigned MaxGPR = 0;
|
2013-04-30 08:13:13 +08:00
|
|
|
bool killPixel = false;
|
2015-02-19 09:10:53 +08:00
|
|
|
const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>();
|
|
|
|
const R600RegisterInfo *RI =
|
|
|
|
static_cast<const R600RegisterInfo *>(STM.getRegisterInfo());
|
2014-07-13 11:06:43 +08:00
|
|
|
const R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
|
2013-04-17 23:17:25 +08:00
|
|
|
|
2014-07-13 11:06:43 +08:00
|
|
|
for (const MachineBasicBlock &MBB : MF) {
|
|
|
|
for (const MachineInstr &MI : MBB) {
|
2013-04-30 08:13:13 +08:00
|
|
|
if (MI.getOpcode() == AMDGPU::KILLGT)
|
|
|
|
killPixel = true;
|
2013-04-17 23:17:25 +08:00
|
|
|
unsigned numOperands = MI.getNumOperands();
|
|
|
|
for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) {
|
2014-07-13 11:06:43 +08:00
|
|
|
const MachineOperand &MO = MI.getOperand(op_idx);
|
2013-04-17 23:17:25 +08:00
|
|
|
if (!MO.isReg())
|
|
|
|
continue;
|
|
|
|
unsigned HWReg = RI->getEncodingValue(MO.getReg()) & 0xff;
|
|
|
|
|
|
|
|
// Register with value > 127 aren't GPR
|
|
|
|
if (HWReg > 127)
|
|
|
|
continue;
|
|
|
|
MaxGPR = std::max(MaxGPR, HWReg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-05-07 01:50:51 +08:00
|
|
|
|
|
|
|
unsigned RsrcReg;
|
2013-06-08 04:37:48 +08:00
|
|
|
if (STM.getGeneration() >= AMDGPUSubtarget::EVERGREEN) {
|
2013-05-07 01:50:51 +08:00
|
|
|
// Evergreen / Northern Islands
|
2016-04-07 03:40:20 +08:00
|
|
|
switch (MF.getFunction()->getCallingConv()) {
|
2013-05-07 01:50:51 +08:00
|
|
|
default: // Fall through
|
2016-04-07 03:40:20 +08:00
|
|
|
case CallingConv::AMDGPU_CS: RsrcReg = R_0288D4_SQ_PGM_RESOURCES_LS; break;
|
|
|
|
case CallingConv::AMDGPU_GS: RsrcReg = R_028878_SQ_PGM_RESOURCES_GS; break;
|
|
|
|
case CallingConv::AMDGPU_PS: RsrcReg = R_028844_SQ_PGM_RESOURCES_PS; break;
|
|
|
|
case CallingConv::AMDGPU_VS: RsrcReg = R_028860_SQ_PGM_RESOURCES_VS; break;
|
2013-05-07 01:50:51 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// R600 / R700
|
2016-04-07 03:40:20 +08:00
|
|
|
switch (MF.getFunction()->getCallingConv()) {
|
2013-05-07 01:50:51 +08:00
|
|
|
default: // Fall through
|
2016-04-07 03:40:20 +08:00
|
|
|
case CallingConv::AMDGPU_GS: // Fall through
|
|
|
|
case CallingConv::AMDGPU_CS: // Fall through
|
|
|
|
case CallingConv::AMDGPU_VS: RsrcReg = R_028868_SQ_PGM_RESOURCES_VS; break;
|
|
|
|
case CallingConv::AMDGPU_PS: RsrcReg = R_028850_SQ_PGM_RESOURCES_PS; break;
|
2013-05-07 01:50:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitIntValue(RsrcReg, 4);
|
|
|
|
OutStreamer->EmitIntValue(S_NUM_GPRS(MaxGPR + 1) |
|
2013-05-07 01:50:51 +08:00
|
|
|
S_STACK_SIZE(MFI->StackSize), 4);
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitIntValue(R_02880C_DB_SHADER_CONTROL, 4);
|
|
|
|
OutStreamer->EmitIntValue(S_02880C_KILL_ENABLE(killPixel), 4);
|
2013-06-28 23:47:08 +08:00
|
|
|
|
2016-04-07 03:40:20 +08:00
|
|
|
if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitIntValue(R_0288E8_SQ_LDS_ALLOC, 4);
|
2016-01-15 05:06:47 +08:00
|
|
|
OutStreamer->EmitIntValue(alignTo(MFI->LDSSize, 4) >> 2, 4);
|
2013-06-28 23:47:08 +08:00
|
|
|
}
|
2013-04-17 23:17:25 +08:00
|
|
|
}
|
|
|
|
|
2014-04-16 06:40:47 +08:00
|
|
|
void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
|
2014-07-13 11:06:43 +08:00
|
|
|
const MachineFunction &MF) const {
|
2015-02-19 09:10:53 +08:00
|
|
|
const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>();
|
2014-12-03 05:28:53 +08:00
|
|
|
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
|
2014-04-16 06:40:47 +08:00
|
|
|
uint64_t CodeSize = 0;
|
2012-12-12 05:25:42 +08:00
|
|
|
unsigned MaxSGPR = 0;
|
|
|
|
unsigned MaxVGPR = 0;
|
|
|
|
bool VCCUsed = false;
|
2014-09-15 23:41:53 +08:00
|
|
|
bool FlatUsed = false;
|
2015-02-19 09:10:53 +08:00
|
|
|
const SIRegisterInfo *RI =
|
|
|
|
static_cast<const SIRegisterInfo *>(STM.getRegisterInfo());
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2014-07-13 11:06:43 +08:00
|
|
|
for (const MachineBasicBlock &MBB : MF) {
|
|
|
|
for (const MachineInstr &MI : MBB) {
|
2014-04-16 06:40:47 +08:00
|
|
|
// TODO: CodeSize should account for multiple functions.
|
2015-08-12 17:04:44 +08:00
|
|
|
|
|
|
|
// TODO: Should we count size of debug info?
|
|
|
|
if (MI.isDebugValue())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// FIXME: This is reporting 0 for many instructions.
|
2014-04-16 06:40:47 +08:00
|
|
|
CodeSize += MI.getDesc().Size;
|
|
|
|
|
2012-12-12 05:25:42 +08:00
|
|
|
unsigned numOperands = MI.getNumOperands();
|
|
|
|
for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) {
|
2014-07-13 11:06:43 +08:00
|
|
|
const MachineOperand &MO = MI.getOperand(op_idx);
|
2012-12-12 05:25:42 +08:00
|
|
|
unsigned width = 0;
|
|
|
|
bool isSGPR = false;
|
2014-01-09 05:47:14 +08:00
|
|
|
|
2015-10-02 05:51:59 +08:00
|
|
|
if (!MO.isReg())
|
2012-12-12 05:25:42 +08:00
|
|
|
continue;
|
2015-10-02 05:51:59 +08:00
|
|
|
|
2014-01-09 05:47:14 +08:00
|
|
|
unsigned reg = MO.getReg();
|
2015-10-02 05:51:59 +08:00
|
|
|
switch (reg) {
|
|
|
|
case AMDGPU::EXEC:
|
|
|
|
case AMDGPU::SCC:
|
|
|
|
case AMDGPU::M0:
|
|
|
|
continue;
|
|
|
|
|
|
|
|
case AMDGPU::VCC:
|
|
|
|
case AMDGPU::VCC_LO:
|
|
|
|
case AMDGPU::VCC_HI:
|
2012-12-12 05:25:42 +08:00
|
|
|
VCCUsed = true;
|
|
|
|
continue;
|
2015-10-02 05:51:59 +08:00
|
|
|
|
|
|
|
case AMDGPU::FLAT_SCR:
|
|
|
|
case AMDGPU::FLAT_SCR_LO:
|
|
|
|
case AMDGPU::FLAT_SCR_HI:
|
2014-09-15 23:41:53 +08:00
|
|
|
FlatUsed = true;
|
|
|
|
continue;
|
2013-10-23 05:11:31 +08:00
|
|
|
|
2016-04-14 00:18:41 +08:00
|
|
|
case AMDGPU::TBA:
|
|
|
|
case AMDGPU::TBA_LO:
|
|
|
|
case AMDGPU::TBA_HI:
|
|
|
|
case AMDGPU::TMA:
|
|
|
|
case AMDGPU::TMA_LO:
|
|
|
|
case AMDGPU::TMA_HI:
|
|
|
|
llvm_unreachable("Trap Handler registers should not be used");
|
|
|
|
continue;
|
|
|
|
|
2015-10-02 05:51:59 +08:00
|
|
|
default:
|
|
|
|
break;
|
2012-12-12 05:25:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (AMDGPU::SReg_32RegClass.contains(reg)) {
|
2016-04-14 00:18:41 +08:00
|
|
|
if (AMDGPU::TTMP_32RegClass.contains(reg)) {
|
|
|
|
llvm_unreachable("Trap Handler registers should not be used");
|
|
|
|
}
|
2012-12-12 05:25:42 +08:00
|
|
|
isSGPR = true;
|
|
|
|
width = 1;
|
2015-01-08 04:59:25 +08:00
|
|
|
} else if (AMDGPU::VGPR_32RegClass.contains(reg)) {
|
2012-12-12 05:25:42 +08:00
|
|
|
isSGPR = false;
|
|
|
|
width = 1;
|
|
|
|
} else if (AMDGPU::SReg_64RegClass.contains(reg)) {
|
2016-04-14 00:18:41 +08:00
|
|
|
if (AMDGPU::TTMP_64RegClass.contains(reg)) {
|
|
|
|
llvm_unreachable("Trap Handler registers should not be used");
|
|
|
|
}
|
2012-12-12 05:25:42 +08:00
|
|
|
isSGPR = true;
|
|
|
|
width = 2;
|
|
|
|
} else if (AMDGPU::VReg_64RegClass.contains(reg)) {
|
|
|
|
isSGPR = false;
|
|
|
|
width = 2;
|
2013-04-10 16:39:16 +08:00
|
|
|
} else if (AMDGPU::VReg_96RegClass.contains(reg)) {
|
|
|
|
isSGPR = false;
|
|
|
|
width = 3;
|
2012-12-12 05:25:42 +08:00
|
|
|
} else if (AMDGPU::SReg_128RegClass.contains(reg)) {
|
|
|
|
isSGPR = true;
|
|
|
|
width = 4;
|
|
|
|
} else if (AMDGPU::VReg_128RegClass.contains(reg)) {
|
|
|
|
isSGPR = false;
|
|
|
|
width = 4;
|
|
|
|
} else if (AMDGPU::SReg_256RegClass.contains(reg)) {
|
|
|
|
isSGPR = true;
|
|
|
|
width = 8;
|
2013-02-08 01:02:09 +08:00
|
|
|
} else if (AMDGPU::VReg_256RegClass.contains(reg)) {
|
|
|
|
isSGPR = false;
|
|
|
|
width = 8;
|
2013-10-23 08:44:12 +08:00
|
|
|
} else if (AMDGPU::SReg_512RegClass.contains(reg)) {
|
|
|
|
isSGPR = true;
|
|
|
|
width = 16;
|
2013-02-08 01:02:09 +08:00
|
|
|
} else if (AMDGPU::VReg_512RegClass.contains(reg)) {
|
|
|
|
isSGPR = false;
|
|
|
|
width = 16;
|
2012-12-12 05:25:42 +08:00
|
|
|
} else {
|
2013-12-11 05:37:42 +08:00
|
|
|
llvm_unreachable("Unknown register class");
|
2012-12-12 05:25:42 +08:00
|
|
|
}
|
2014-01-09 05:47:14 +08:00
|
|
|
unsigned hwReg = RI->getEncodingValue(reg) & 0xff;
|
|
|
|
unsigned maxUsed = hwReg + width - 1;
|
2012-12-12 05:25:42 +08:00
|
|
|
if (isSGPR) {
|
|
|
|
MaxSGPR = maxUsed > MaxSGPR ? maxUsed : MaxSGPR;
|
|
|
|
} else {
|
|
|
|
MaxVGPR = maxUsed > MaxVGPR ? maxUsed : MaxVGPR;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-12-05 13:15:35 +08:00
|
|
|
|
AMDGPU/SI: xnack_mask is always reserved on VI
Summary:
Somehow, I first interpreted the docs as saying space for xnack_mask is only
reserved when XNACK is enabled via SH_MEM_CONFIG. I felt uneasy about this and
went back to actually test what is happening, and it turns out that xnack_mask
is always reserved at least on Tonga and Carrizo, in the sense that flat_scr
is always fixed below the SGPRs that are used to implement xnack_mask, whether
or not they are actually used.
I confirmed this by writing a shader using inline assembly to tease out the
aliasing between flat_scratch and regular SGPRs. For example, on Tonga, where
we fix the number of SGPRs to 80, s[74:75] aliases flat_scratch (so
xnack_mask is s[76:77] and vcc is s[78:79]).
This patch changes both the calculation of the total number of SGPRs and the
various register reservations to account for this.
It ought to be possible to use the gap left by xnack_mask when the feature
isn't used, but this patch doesn't try to do that. (Note that the same applies
to vcc.)
Note that previously, even before my earlier change in r256794, the SGPRs that
alias to xnack_mask could end up being used as well when flat_scr was unused
and the total number of SGPRs happened to fall on the right alignment
(e.g. highest regular SGPR being used s29 and VCC used would lead to number
of SGPRs being 32, where s28 and s29 alias with xnack_mask). So if there
were some conflict due to such aliasing, we should have noticed that already.
Reviewers: arsenm, tstellarAMD
Subscribers: arsenm, llvm-commits
Differential Revision: http://reviews.llvm.org/D15898
llvm-svn: 257073
2016-01-08 01:10:20 +08:00
|
|
|
unsigned ExtraSGPRs = 0;
|
2013-12-05 13:15:35 +08:00
|
|
|
|
AMDGPU/SI: xnack_mask is always reserved on VI
Summary:
Somehow, I first interpreted the docs as saying space for xnack_mask is only
reserved when XNACK is enabled via SH_MEM_CONFIG. I felt uneasy about this and
went back to actually test what is happening, and it turns out that xnack_mask
is always reserved at least on Tonga and Carrizo, in the sense that flat_scr
is always fixed below the SGPRs that are used to implement xnack_mask, whether
or not they are actually used.
I confirmed this by writing a shader using inline assembly to tease out the
aliasing between flat_scratch and regular SGPRs. For example, on Tonga, where
we fix the number of SGPRs to 80, s[74:75] aliases flat_scratch (so
xnack_mask is s[76:77] and vcc is s[78:79]).
This patch changes both the calculation of the total number of SGPRs and the
various register reservations to account for this.
It ought to be possible to use the gap left by xnack_mask when the feature
isn't used, but this patch doesn't try to do that. (Note that the same applies
to vcc.)
Note that previously, even before my earlier change in r256794, the SGPRs that
alias to xnack_mask could end up being used as well when flat_scr was unused
and the total number of SGPRs happened to fall on the right alignment
(e.g. highest regular SGPR being used s29 and VCC used would lead to number
of SGPRs being 32, where s28 and s29 alias with xnack_mask). So if there
were some conflict due to such aliasing, we should have noticed that already.
Reviewers: arsenm, tstellarAMD
Subscribers: arsenm, llvm-commits
Differential Revision: http://reviews.llvm.org/D15898
llvm-svn: 257073
2016-01-08 01:10:20 +08:00
|
|
|
if (VCCUsed)
|
|
|
|
ExtraSGPRs = 2;
|
2016-01-05 07:35:53 +08:00
|
|
|
|
AMDGPU/SI: xnack_mask is always reserved on VI
Summary:
Somehow, I first interpreted the docs as saying space for xnack_mask is only
reserved when XNACK is enabled via SH_MEM_CONFIG. I felt uneasy about this and
went back to actually test what is happening, and it turns out that xnack_mask
is always reserved at least on Tonga and Carrizo, in the sense that flat_scr
is always fixed below the SGPRs that are used to implement xnack_mask, whether
or not they are actually used.
I confirmed this by writing a shader using inline assembly to tease out the
aliasing between flat_scratch and regular SGPRs. For example, on Tonga, where
we fix the number of SGPRs to 80, s[74:75] aliases flat_scratch (so
xnack_mask is s[76:77] and vcc is s[78:79]).
This patch changes both the calculation of the total number of SGPRs and the
various register reservations to account for this.
It ought to be possible to use the gap left by xnack_mask when the feature
isn't used, but this patch doesn't try to do that. (Note that the same applies
to vcc.)
Note that previously, even before my earlier change in r256794, the SGPRs that
alias to xnack_mask could end up being used as well when flat_scr was unused
and the total number of SGPRs happened to fall on the right alignment
(e.g. highest regular SGPR being used s29 and VCC used would lead to number
of SGPRs being 32, where s28 and s29 alias with xnack_mask). So if there
were some conflict due to such aliasing, we should have noticed that already.
Reviewers: arsenm, tstellarAMD
Subscribers: arsenm, llvm-commits
Differential Revision: http://reviews.llvm.org/D15898
llvm-svn: 257073
2016-01-08 01:10:20 +08:00
|
|
|
if (STM.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS) {
|
|
|
|
if (FlatUsed)
|
|
|
|
ExtraSGPRs = 4;
|
|
|
|
} else {
|
2016-01-05 07:35:53 +08:00
|
|
|
if (STM.isXNACKEnabled())
|
AMDGPU/SI: xnack_mask is always reserved on VI
Summary:
Somehow, I first interpreted the docs as saying space for xnack_mask is only
reserved when XNACK is enabled via SH_MEM_CONFIG. I felt uneasy about this and
went back to actually test what is happening, and it turns out that xnack_mask
is always reserved at least on Tonga and Carrizo, in the sense that flat_scr
is always fixed below the SGPRs that are used to implement xnack_mask, whether
or not they are actually used.
I confirmed this by writing a shader using inline assembly to tease out the
aliasing between flat_scratch and regular SGPRs. For example, on Tonga, where
we fix the number of SGPRs to 80, s[74:75] aliases flat_scratch (so
xnack_mask is s[76:77] and vcc is s[78:79]).
This patch changes both the calculation of the total number of SGPRs and the
various register reservations to account for this.
It ought to be possible to use the gap left by xnack_mask when the feature
isn't used, but this patch doesn't try to do that. (Note that the same applies
to vcc.)
Note that previously, even before my earlier change in r256794, the SGPRs that
alias to xnack_mask could end up being used as well when flat_scr was unused
and the total number of SGPRs happened to fall on the right alignment
(e.g. highest regular SGPR being used s29 and VCC used would lead to number
of SGPRs being 32, where s28 and s29 alias with xnack_mask). So if there
were some conflict due to such aliasing, we should have noticed that already.
Reviewers: arsenm, tstellarAMD
Subscribers: arsenm, llvm-commits
Differential Revision: http://reviews.llvm.org/D15898
llvm-svn: 257073
2016-01-08 01:10:20 +08:00
|
|
|
ExtraSGPRs = 4;
|
|
|
|
|
|
|
|
if (FlatUsed)
|
|
|
|
ExtraSGPRs = 6;
|
2015-12-18 01:05:09 +08:00
|
|
|
}
|
2014-09-15 23:41:53 +08:00
|
|
|
|
AMDGPU/SI: xnack_mask is always reserved on VI
Summary:
Somehow, I first interpreted the docs as saying space for xnack_mask is only
reserved when XNACK is enabled via SH_MEM_CONFIG. I felt uneasy about this and
went back to actually test what is happening, and it turns out that xnack_mask
is always reserved at least on Tonga and Carrizo, in the sense that flat_scr
is always fixed below the SGPRs that are used to implement xnack_mask, whether
or not they are actually used.
I confirmed this by writing a shader using inline assembly to tease out the
aliasing between flat_scratch and regular SGPRs. For example, on Tonga, where
we fix the number of SGPRs to 80, s[74:75] aliases flat_scratch (so
xnack_mask is s[76:77] and vcc is s[78:79]).
This patch changes both the calculation of the total number of SGPRs and the
various register reservations to account for this.
It ought to be possible to use the gap left by xnack_mask when the feature
isn't used, but this patch doesn't try to do that. (Note that the same applies
to vcc.)
Note that previously, even before my earlier change in r256794, the SGPRs that
alias to xnack_mask could end up being used as well when flat_scr was unused
and the total number of SGPRs happened to fall on the right alignment
(e.g. highest regular SGPR being used s29 and VCC used would lead to number
of SGPRs being 32, where s28 and s29 alias with xnack_mask). So if there
were some conflict due to such aliasing, we should have noticed that already.
Reviewers: arsenm, tstellarAMD
Subscribers: arsenm, llvm-commits
Differential Revision: http://reviews.llvm.org/D15898
llvm-svn: 257073
2016-01-08 01:10:20 +08:00
|
|
|
MaxSGPR += ExtraSGPRs;
|
|
|
|
|
2014-09-12 06:51:37 +08:00
|
|
|
// We found the maximum register index. They start at 0, so add one to get the
|
|
|
|
// number of registers.
|
|
|
|
ProgInfo.NumVGPR = MaxVGPR + 1;
|
|
|
|
ProgInfo.NumSGPR = MaxSGPR + 1;
|
2014-06-27 01:22:30 +08:00
|
|
|
|
2015-03-09 23:48:09 +08:00
|
|
|
if (STM.hasSGPRInitBug()) {
|
2015-06-18 04:55:25 +08:00
|
|
|
if (ProgInfo.NumSGPR > AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG) {
|
|
|
|
LLVMContext &Ctx = MF.getFunction()->getContext();
|
|
|
|
Ctx.emitError("too many SGPRs used with the SGPR init bug");
|
|
|
|
}
|
2015-03-09 23:48:09 +08:00
|
|
|
|
|
|
|
ProgInfo.NumSGPR = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG;
|
|
|
|
}
|
|
|
|
|
2015-12-01 05:16:07 +08:00
|
|
|
if (MFI->NumUserSGPRs > STM.getMaxNumUserSGPRs()) {
|
|
|
|
LLVMContext &Ctx = MF.getFunction()->getContext();
|
|
|
|
Ctx.emitError("too many user SGPRs used");
|
|
|
|
}
|
|
|
|
|
2014-12-03 05:28:53 +08:00
|
|
|
ProgInfo.VGPRBlocks = (ProgInfo.NumVGPR - 1) / 4;
|
|
|
|
ProgInfo.SGPRBlocks = (ProgInfo.NumSGPR - 1) / 8;
|
2014-06-27 01:22:30 +08:00
|
|
|
// Set the value to initialize FP_ROUND and FP_DENORM parts of the mode
|
|
|
|
// register.
|
|
|
|
ProgInfo.FloatMode = getFPMode(MF);
|
|
|
|
|
|
|
|
ProgInfo.IEEEMode = 0;
|
|
|
|
|
2016-01-29 04:53:35 +08:00
|
|
|
// Make clamp modifier on NaN input returns 0.
|
|
|
|
ProgInfo.DX10Clamp = 1;
|
2014-06-27 01:22:30 +08:00
|
|
|
|
2014-07-21 23:45:01 +08:00
|
|
|
const MachineFrameInfo *FrameInfo = MF.getFrameInfo();
|
2016-03-01 12:58:20 +08:00
|
|
|
ProgInfo.ScratchSize = FrameInfo->getStackSize();
|
2014-07-21 23:45:01 +08:00
|
|
|
|
2014-09-15 23:41:53 +08:00
|
|
|
ProgInfo.FlatUsed = FlatUsed;
|
|
|
|
ProgInfo.VCCUsed = VCCUsed;
|
2014-06-27 01:22:30 +08:00
|
|
|
ProgInfo.CodeLen = CodeSize;
|
2013-04-16 01:51:35 +08:00
|
|
|
|
2013-10-30 00:37:28 +08:00
|
|
|
unsigned LDSAlignShift;
|
|
|
|
if (STM.getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) {
|
2014-06-27 01:22:30 +08:00
|
|
|
// LDS is allocated in 64 dword blocks.
|
2013-10-30 00:37:28 +08:00
|
|
|
LDSAlignShift = 8;
|
|
|
|
} else {
|
2014-06-27 01:22:30 +08:00
|
|
|
// LDS is allocated in 128 dword blocks.
|
2013-10-30 00:37:28 +08:00
|
|
|
LDSAlignShift = 9;
|
|
|
|
}
|
2014-06-27 01:22:30 +08:00
|
|
|
|
2014-09-24 09:33:17 +08:00
|
|
|
unsigned LDSSpillSize = MFI->LDSWaveSpillSize *
|
|
|
|
MFI->getMaximumWorkGroupSize(MF);
|
|
|
|
|
2014-12-03 05:28:53 +08:00
|
|
|
ProgInfo.LDSSize = MFI->LDSSize + LDSSpillSize;
|
|
|
|
ProgInfo.LDSBlocks =
|
2016-03-31 05:30:00 +08:00
|
|
|
alignTo(ProgInfo.LDSSize, 1ULL << LDSAlignShift) >> LDSAlignShift;
|
2013-10-30 00:37:28 +08:00
|
|
|
|
2014-07-21 23:45:01 +08:00
|
|
|
// Scratch is allocated in 256 dword blocks.
|
|
|
|
unsigned ScratchAlignShift = 10;
|
|
|
|
// We need to program the hardware with the amount of scratch memory that
|
2014-12-03 05:28:53 +08:00
|
|
|
// is used by the entire wave. ProgInfo.ScratchSize is the amount of
|
2014-07-21 23:45:01 +08:00
|
|
|
// scratch memory used per thread.
|
2014-12-03 05:28:53 +08:00
|
|
|
ProgInfo.ScratchBlocks =
|
2016-01-15 05:06:47 +08:00
|
|
|
alignTo(ProgInfo.ScratchSize * STM.getWavefrontSize(),
|
2016-03-31 05:30:00 +08:00
|
|
|
1ULL << ScratchAlignShift) >>
|
2016-01-15 05:06:47 +08:00
|
|
|
ScratchAlignShift;
|
2014-07-21 23:45:01 +08:00
|
|
|
|
2014-12-03 05:28:53 +08:00
|
|
|
ProgInfo.ComputePGMRSrc1 =
|
|
|
|
S_00B848_VGPRS(ProgInfo.VGPRBlocks) |
|
|
|
|
S_00B848_SGPRS(ProgInfo.SGPRBlocks) |
|
|
|
|
S_00B848_PRIORITY(ProgInfo.Priority) |
|
|
|
|
S_00B848_FLOAT_MODE(ProgInfo.FloatMode) |
|
|
|
|
S_00B848_PRIV(ProgInfo.Priv) |
|
|
|
|
S_00B848_DX10_CLAMP(ProgInfo.DX10Clamp) |
|
2015-12-01 05:16:03 +08:00
|
|
|
S_00B848_DEBUG_MODE(ProgInfo.DebugMode) |
|
2014-12-03 05:28:53 +08:00
|
|
|
S_00B848_IEEE_MODE(ProgInfo.IEEEMode);
|
|
|
|
|
2015-12-01 05:16:03 +08:00
|
|
|
// 0 = X, 1 = XY, 2 = XYZ
|
|
|
|
unsigned TIDIGCompCnt = 0;
|
|
|
|
if (MFI->hasWorkItemIDZ())
|
|
|
|
TIDIGCompCnt = 2;
|
|
|
|
else if (MFI->hasWorkItemIDY())
|
|
|
|
TIDIGCompCnt = 1;
|
|
|
|
|
2014-12-03 05:28:53 +08:00
|
|
|
ProgInfo.ComputePGMRSrc2 =
|
|
|
|
S_00B84C_SCRATCH_EN(ProgInfo.ScratchBlocks > 0) |
|
2015-12-01 05:16:03 +08:00
|
|
|
S_00B84C_USER_SGPR(MFI->getNumUserSGPRs()) |
|
|
|
|
S_00B84C_TGID_X_EN(MFI->hasWorkGroupIDX()) |
|
|
|
|
S_00B84C_TGID_Y_EN(MFI->hasWorkGroupIDY()) |
|
|
|
|
S_00B84C_TGID_Z_EN(MFI->hasWorkGroupIDZ()) |
|
|
|
|
S_00B84C_TG_SIZE_EN(MFI->hasWorkGroupInfo()) |
|
|
|
|
S_00B84C_TIDIG_COMP_CNT(TIDIGCompCnt) |
|
|
|
|
S_00B84C_EXCP_EN_MSB(0) |
|
|
|
|
S_00B84C_LDS_SIZE(ProgInfo.LDSBlocks) |
|
|
|
|
S_00B84C_EXCP_EN(0);
|
2014-12-03 05:28:53 +08:00
|
|
|
}
|
|
|
|
|
2016-04-07 03:40:20 +08:00
|
|
|
static unsigned getRsrcReg(CallingConv::ID CallConv) {
|
|
|
|
switch (CallConv) {
|
2014-12-03 05:28:53 +08:00
|
|
|
default: // Fall through
|
2016-04-07 03:40:20 +08:00
|
|
|
case CallingConv::AMDGPU_CS: return R_00B848_COMPUTE_PGM_RSRC1;
|
|
|
|
case CallingConv::AMDGPU_GS: return R_00B228_SPI_SHADER_PGM_RSRC1_GS;
|
|
|
|
case CallingConv::AMDGPU_PS: return R_00B028_SPI_SHADER_PGM_RSRC1_PS;
|
|
|
|
case CallingConv::AMDGPU_VS: return R_00B128_SPI_SHADER_PGM_RSRC1_VS;
|
2014-12-03 05:28:53 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void AMDGPUAsmPrinter::EmitProgramInfoSI(const MachineFunction &MF,
|
|
|
|
const SIProgramInfo &KernelInfo) {
|
2015-02-19 09:10:53 +08:00
|
|
|
const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>();
|
2014-12-03 05:28:53 +08:00
|
|
|
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
|
2016-04-07 03:40:20 +08:00
|
|
|
unsigned RsrcReg = getRsrcReg(MF.getFunction()->getCallingConv());
|
2014-09-20 04:42:37 +08:00
|
|
|
|
2016-04-07 03:40:20 +08:00
|
|
|
if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitIntValue(R_00B848_COMPUTE_PGM_RSRC1, 4);
|
2014-06-27 01:22:30 +08:00
|
|
|
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitIntValue(KernelInfo.ComputePGMRSrc1, 4);
|
2014-06-27 01:22:30 +08:00
|
|
|
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitIntValue(R_00B84C_COMPUTE_PGM_RSRC2, 4);
|
|
|
|
OutStreamer->EmitIntValue(KernelInfo.ComputePGMRSrc2, 4);
|
2014-07-21 23:45:01 +08:00
|
|
|
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitIntValue(R_00B860_COMPUTE_TMPRING_SIZE, 4);
|
|
|
|
OutStreamer->EmitIntValue(S_00B860_WAVESIZE(KernelInfo.ScratchBlocks), 4);
|
2014-09-15 23:41:53 +08:00
|
|
|
|
|
|
|
// TODO: Should probably note flat usage somewhere. SC emits a "FlatPtr32 =
|
|
|
|
// 0" comment but I don't see a corresponding field in the register spec.
|
2014-06-27 01:22:30 +08:00
|
|
|
} else {
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitIntValue(RsrcReg, 4);
|
|
|
|
OutStreamer->EmitIntValue(S_00B028_VGPRS(KernelInfo.VGPRBlocks) |
|
|
|
|
S_00B028_SGPRS(KernelInfo.SGPRBlocks), 4);
|
2016-04-07 03:40:20 +08:00
|
|
|
if (STM.isVGPRSpillingEnabled(*MF.getFunction())) {
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitIntValue(R_0286E8_SPI_TMPRING_SIZE, 4);
|
|
|
|
OutStreamer->EmitIntValue(S_0286E8_WAVESIZE(KernelInfo.ScratchBlocks), 4);
|
2015-01-21 03:33:04 +08:00
|
|
|
}
|
2013-07-11 00:37:07 +08:00
|
|
|
}
|
2014-06-27 01:22:30 +08:00
|
|
|
|
2016-04-07 03:40:20 +08:00
|
|
|
if (MF.getFunction()->getCallingConv() == CallingConv::AMDGPU_PS) {
|
2015-04-25 03:11:51 +08:00
|
|
|
OutStreamer->EmitIntValue(R_00B02C_SPI_SHADER_PGM_RSRC2_PS, 4);
|
|
|
|
OutStreamer->EmitIntValue(S_00B02C_EXTRA_LDS_SIZE(KernelInfo.LDSBlocks), 4);
|
|
|
|
OutStreamer->EmitIntValue(R_0286CC_SPI_PS_INPUT_ENA, 4);
|
2016-01-13 19:45:36 +08:00
|
|
|
OutStreamer->EmitIntValue(MFI->PSInputEna, 4);
|
|
|
|
OutStreamer->EmitIntValue(R_0286D0_SPI_PS_INPUT_ADDR, 4);
|
|
|
|
OutStreamer->EmitIntValue(MFI->getPSInputAddr(), 4);
|
2013-04-16 01:51:35 +08:00
|
|
|
}
|
2012-12-12 05:25:42 +08:00
|
|
|
}
|
2014-12-03 06:00:07 +08:00
|
|
|
|
2016-02-12 10:40:47 +08:00
|
|
|
// This is supposed to be log2(Size)
|
|
|
|
static amd_element_byte_size_t getElementByteSizeValue(unsigned Size) {
|
|
|
|
switch (Size) {
|
|
|
|
case 4:
|
|
|
|
return AMD_ELEMENT_4_BYTES;
|
|
|
|
case 8:
|
|
|
|
return AMD_ELEMENT_8_BYTES;
|
|
|
|
case 16:
|
|
|
|
return AMD_ELEMENT_16_BYTES;
|
|
|
|
default:
|
|
|
|
llvm_unreachable("invalid private_element_size");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-03 06:00:07 +08:00
|
|
|
void AMDGPUAsmPrinter::EmitAmdKernelCodeT(const MachineFunction &MF,
|
2015-06-27 05:58:31 +08:00
|
|
|
const SIProgramInfo &KernelInfo) const {
|
2014-12-03 06:00:07 +08:00
|
|
|
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
|
2015-02-19 09:10:53 +08:00
|
|
|
const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>();
|
2014-12-03 06:00:07 +08:00
|
|
|
amd_kernel_code_t header;
|
|
|
|
|
2015-06-27 05:58:31 +08:00
|
|
|
AMDGPU::initDefaultAMDKernelCodeT(header, STM.getFeatureBits());
|
2014-12-03 06:00:07 +08:00
|
|
|
|
|
|
|
header.compute_pgm_resource_registers =
|
|
|
|
KernelInfo.ComputePGMRSrc1 |
|
|
|
|
(KernelInfo.ComputePGMRSrc2 << 32);
|
2015-12-01 05:16:03 +08:00
|
|
|
header.code_properties = AMD_CODE_PROPERTY_IS_PTR64;
|
|
|
|
|
2016-02-12 10:40:47 +08:00
|
|
|
|
|
|
|
AMD_HSA_BITS_SET(header.code_properties,
|
|
|
|
AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE,
|
|
|
|
getElementByteSizeValue(STM.getMaxPrivateElementSize()));
|
|
|
|
|
2015-12-01 05:16:03 +08:00
|
|
|
if (MFI->hasPrivateSegmentBuffer()) {
|
|
|
|
header.code_properties |=
|
|
|
|
AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (MFI->hasDispatchPtr())
|
|
|
|
header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR;
|
|
|
|
|
|
|
|
if (MFI->hasQueuePtr())
|
|
|
|
header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR;
|
|
|
|
|
|
|
|
if (MFI->hasKernargSegmentPtr())
|
|
|
|
header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR;
|
|
|
|
|
|
|
|
if (MFI->hasDispatchID())
|
|
|
|
header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID;
|
|
|
|
|
|
|
|
if (MFI->hasFlatScratchInit())
|
|
|
|
header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT;
|
|
|
|
|
|
|
|
// TODO: Private segment size
|
|
|
|
|
|
|
|
if (MFI->hasGridWorkgroupCountX()) {
|
|
|
|
header.code_properties |=
|
|
|
|
AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (MFI->hasGridWorkgroupCountY()) {
|
|
|
|
header.code_properties |=
|
|
|
|
AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (MFI->hasGridWorkgroupCountZ()) {
|
|
|
|
header.code_properties |=
|
|
|
|
AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z;
|
|
|
|
}
|
2014-12-03 06:00:07 +08:00
|
|
|
|
2015-11-26 08:43:29 +08:00
|
|
|
if (MFI->hasDispatchPtr())
|
|
|
|
header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR;
|
|
|
|
|
2016-01-05 07:35:53 +08:00
|
|
|
if (STM.isXNACKEnabled())
|
|
|
|
header.code_properties |= AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED;
|
|
|
|
|
2014-12-03 06:00:07 +08:00
|
|
|
header.kernarg_segment_byte_size = MFI->ABIArgOffset;
|
|
|
|
header.wavefront_sgpr_count = KernelInfo.NumSGPR;
|
|
|
|
header.workitem_vgpr_count = KernelInfo.NumVGPR;
|
2015-12-16 06:55:30 +08:00
|
|
|
header.workitem_private_segment_byte_size = KernelInfo.ScratchSize;
|
2015-12-16 07:15:25 +08:00
|
|
|
header.workgroup_group_segment_byte_size = KernelInfo.LDSSize;
|
2014-12-03 06:00:07 +08:00
|
|
|
|
2015-06-27 05:58:31 +08:00
|
|
|
AMDGPUTargetStreamer *TS =
|
|
|
|
static_cast<AMDGPUTargetStreamer *>(OutStreamer->getTargetStreamer());
|
|
|
|
TS->EmitAMDKernelCodeT(header);
|
2014-12-03 06:00:07 +08:00
|
|
|
}
|
2015-04-08 09:09:26 +08:00
|
|
|
|
|
|
|
bool AMDGPUAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
|
|
|
|
unsigned AsmVariant,
|
|
|
|
const char *ExtraCode, raw_ostream &O) {
|
|
|
|
if (ExtraCode && ExtraCode[0]) {
|
|
|
|
if (ExtraCode[1] != 0)
|
|
|
|
return true; // Unknown modifier.
|
|
|
|
|
|
|
|
switch (ExtraCode[0]) {
|
|
|
|
default:
|
|
|
|
// See if this is a generic print operand
|
|
|
|
return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
|
|
|
|
case 'r':
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
AMDGPUInstPrinter::printRegOperand(MI->getOperand(OpNo).getReg(), O,
|
|
|
|
*TM.getSubtargetImpl(*MF->getFunction())->getRegisterInfo());
|
|
|
|
return false;
|
|
|
|
}
|