llvm-project/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp

2131 lines
65 KiB
C++
Raw Normal View History

//===-- NVPTXAsmPrinter.cpp - NVPTX LLVM assembly writer ------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains a printer that converts from our internal representation
// of machine-dependent LLVM code to NVPTX assembly language.
//
//===----------------------------------------------------------------------===//
#include "NVPTXAsmPrinter.h"
#include "InstPrinter/NVPTXInstPrinter.h"
#include "MCTargetDesc/NVPTXMCAsmInfo.h"
#include "NVPTX.h"
#include "NVPTXInstrInfo.h"
#include "NVPTXMCExpr.h"
#include "NVPTXMachineFunctionInfo.h"
#include "NVPTXRegisterInfo.h"
#include "NVPTXTargetMachine.h"
#include "NVPTXUtilities.h"
#include "cl_common_defines.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/TimeValue.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Transforms/Utils/UnrollLoop.h"
#include <sstream>
using namespace llvm;
#define DEPOTNAME "__local_depot"
static cl::opt<bool>
EmitLineNumbers("nvptx-emit-line-numbers", cl::Hidden,
cl::desc("NVPTX Specific: Emit Line numbers even without -G"),
cl::init(true));
static cl::opt<bool>
InterleaveSrc("nvptx-emit-src", cl::ZeroOrMore, cl::Hidden,
cl::desc("NVPTX Specific: Emit source line in ptx file"),
cl::init(false));
namespace {
/// DiscoverDependentGlobals - Return a set of GlobalVariables on which \p V
/// depends.
void DiscoverDependentGlobals(const Value *V,
DenseSet<const GlobalVariable *> &Globals) {
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
Globals.insert(GV);
else {
if (const User *U = dyn_cast<User>(V)) {
for (unsigned i = 0, e = U->getNumOperands(); i != e; ++i) {
DiscoverDependentGlobals(U->getOperand(i), Globals);
}
}
}
}
/// VisitGlobalVariableForEmission - Add \p GV to the list of GlobalVariable
/// instances to be emitted, but only after any dependents have been added
/// first.
void VisitGlobalVariableForEmission(
const GlobalVariable *GV, SmallVectorImpl<const GlobalVariable *> &Order,
DenseSet<const GlobalVariable *> &Visited,
DenseSet<const GlobalVariable *> &Visiting) {
// Have we already visited this one?
if (Visited.count(GV))
return;
// Do we have a circular dependency?
if (!Visiting.insert(GV).second)
report_fatal_error("Circular dependency found in global variable set");
// Make sure we visit all dependents first
DenseSet<const GlobalVariable *> Others;
for (unsigned i = 0, e = GV->getNumOperands(); i != e; ++i)
DiscoverDependentGlobals(GV->getOperand(i), Others);
for (DenseSet<const GlobalVariable *>::iterator I = Others.begin(),
E = Others.end();
I != E; ++I)
VisitGlobalVariableForEmission(*I, Order, Visited, Visiting);
// Now we can visit ourself
Order.push_back(GV);
Visited.insert(GV);
Visiting.erase(GV);
}
}
void NVPTXAsmPrinter::emitLineNumberAsDotLoc(const MachineInstr &MI) {
if (!EmitLineNumbers)
return;
if (ignoreLoc(MI))
return;
DebugLoc curLoc = MI.getDebugLoc();
if (prevDebugLoc.isUnknown() && curLoc.isUnknown())
return;
if (prevDebugLoc == curLoc)
return;
prevDebugLoc = curLoc;
if (curLoc.isUnknown())
return;
const MachineFunction *MF = MI.getParent()->getParent();
//const TargetMachine &TM = MF->getTarget();
const LLVMContext &ctx = MF->getFunction()->getContext();
DIScope Scope(curLoc.getScope(ctx));
assert((!Scope || Scope.isScope()) &&
"Scope of a DebugLoc should be null or a DIScope.");
if (!Scope)
return;
StringRef fileName(Scope.getFilename());
StringRef dirName(Scope.getDirectory());
SmallString<128> FullPathName = dirName;
if (!dirName.empty() && !sys::path::is_absolute(fileName)) {
sys::path::append(FullPathName, fileName);
fileName = FullPathName;
}
if (filenameMap.find(fileName) == filenameMap.end())
return;
// Emit the line from the source file.
if (InterleaveSrc)
this->emitSrcInText(fileName, curLoc.getLine());
std::stringstream temp;
temp << "\t.loc " << filenameMap[fileName] << " " << curLoc.getLine()
<< " " << curLoc.getCol();
OutStreamer.EmitRawText(temp.str());
}
void NVPTXAsmPrinter::EmitInstruction(const MachineInstr *MI) {
SmallString<128> Str;
raw_svector_ostream OS(Str);
if (static_cast<NVPTXTargetMachine &>(TM).getDrvInterface() == NVPTX::CUDA)
emitLineNumberAsDotLoc(*MI);
MCInst Inst;
lowerToMCInst(MI, Inst);
EmitToStreamer(OutStreamer, Inst);
}
// Handle symbol backtracking for targets that do not support image handles
bool NVPTXAsmPrinter::lowerImageHandleOperand(const MachineInstr *MI,
unsigned OpNo, MCOperand &MCOp) {
const MachineOperand &MO = MI->getOperand(OpNo);
const MCInstrDesc &MCID = MI->getDesc();
if (MCID.TSFlags & NVPTXII::IsTexFlag) {
// This is a texture fetch, so operand 4 is a texref and operand 5 is
// a samplerref
if (OpNo == 4 && MO.isImm()) {
lowerImageHandleSymbol(MO.getImm(), MCOp);
return true;
}
if (OpNo == 5 && MO.isImm() && !(MCID.TSFlags & NVPTXII::IsTexModeUnifiedFlag)) {
lowerImageHandleSymbol(MO.getImm(), MCOp);
return true;
}
return false;
} else if (MCID.TSFlags & NVPTXII::IsSuldMask) {
unsigned VecSize =
1 << (((MCID.TSFlags & NVPTXII::IsSuldMask) >> NVPTXII::IsSuldShift) - 1);
// For a surface load of vector size N, the Nth operand will be the surfref
if (OpNo == VecSize && MO.isImm()) {
lowerImageHandleSymbol(MO.getImm(), MCOp);
return true;
}
return false;
} else if (MCID.TSFlags & NVPTXII::IsSustFlag) {
// This is a surface store, so operand 0 is a surfref
if (OpNo == 0 && MO.isImm()) {
lowerImageHandleSymbol(MO.getImm(), MCOp);
return true;
}
return false;
} else if (MCID.TSFlags & NVPTXII::IsSurfTexQueryFlag) {
// This is a query, so operand 1 is a surfref/texref
if (OpNo == 1 && MO.isImm()) {
lowerImageHandleSymbol(MO.getImm(), MCOp);
return true;
}
return false;
}
return false;
}
void NVPTXAsmPrinter::lowerImageHandleSymbol(unsigned Index, MCOperand &MCOp) {
// Ewwww
TargetMachine &TM = const_cast<TargetMachine&>(MF->getTarget());
NVPTXTargetMachine &nvTM = static_cast<NVPTXTargetMachine&>(TM);
const NVPTXMachineFunctionInfo *MFI = MF->getInfo<NVPTXMachineFunctionInfo>();
const char *Sym = MFI->getImageHandleSymbol(Index);
std::string *SymNamePtr =
nvTM.getManagedStrPool()->getManagedString(Sym);
MCOp = GetSymbolRef(OutContext.GetOrCreateSymbol(
StringRef(SymNamePtr->c_str())));
}
void NVPTXAsmPrinter::lowerToMCInst(const MachineInstr *MI, MCInst &OutMI) {
OutMI.setOpcode(MI->getOpcode());
// Special: Do not mangle symbol operand of CALL_PROTOTYPE
if (MI->getOpcode() == NVPTX::CALL_PROTOTYPE) {
const MachineOperand &MO = MI->getOperand(0);
OutMI.addOperand(GetSymbolRef(
OutContext.GetOrCreateSymbol(Twine(MO.getSymbolName()))));
return;
}
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
MCOperand MCOp;
if (!nvptxSubtarget->hasImageHandles()) {
if (lowerImageHandleOperand(MI, i, MCOp)) {
OutMI.addOperand(MCOp);
continue;
}
}
if (lowerOperand(MO, MCOp))
OutMI.addOperand(MCOp);
}
}
bool NVPTXAsmPrinter::lowerOperand(const MachineOperand &MO,
MCOperand &MCOp) {
switch (MO.getType()) {
default: llvm_unreachable("unknown operand type");
case MachineOperand::MO_Register:
MCOp = MCOperand::CreateReg(encodeVirtualRegister(MO.getReg()));
break;
case MachineOperand::MO_Immediate:
MCOp = MCOperand::CreateImm(MO.getImm());
break;
case MachineOperand::MO_MachineBasicBlock:
MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(
MO.getMBB()->getSymbol(), OutContext));
break;
case MachineOperand::MO_ExternalSymbol:
MCOp = GetSymbolRef(GetExternalSymbolSymbol(MO.getSymbolName()));
break;
case MachineOperand::MO_GlobalAddress:
MCOp = GetSymbolRef(getSymbol(MO.getGlobal()));
break;
case MachineOperand::MO_FPImmediate: {
const ConstantFP *Cnt = MO.getFPImm();
APFloat Val = Cnt->getValueAPF();
switch (Cnt->getType()->getTypeID()) {
default: report_fatal_error("Unsupported FP type"); break;
case Type::FloatTyID:
MCOp = MCOperand::CreateExpr(
NVPTXFloatMCExpr::CreateConstantFPSingle(Val, OutContext));
break;
case Type::DoubleTyID:
MCOp = MCOperand::CreateExpr(
NVPTXFloatMCExpr::CreateConstantFPDouble(Val, OutContext));
break;
}
break;
}
}
return true;
}
unsigned NVPTXAsmPrinter::encodeVirtualRegister(unsigned Reg) {
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
const TargetRegisterClass *RC = MRI->getRegClass(Reg);
DenseMap<unsigned, unsigned> &RegMap = VRegMapping[RC];
unsigned RegNum = RegMap[Reg];
// Encode the register class in the upper 4 bits
// Must be kept in sync with NVPTXInstPrinter::printRegName
unsigned Ret = 0;
if (RC == &NVPTX::Int1RegsRegClass) {
Ret = (1 << 28);
} else if (RC == &NVPTX::Int16RegsRegClass) {
Ret = (2 << 28);
} else if (RC == &NVPTX::Int32RegsRegClass) {
Ret = (3 << 28);
} else if (RC == &NVPTX::Int64RegsRegClass) {
Ret = (4 << 28);
} else if (RC == &NVPTX::Float32RegsRegClass) {
Ret = (5 << 28);
} else if (RC == &NVPTX::Float64RegsRegClass) {
Ret = (6 << 28);
} else {
report_fatal_error("Bad register class");
}
// Insert the vreg number
Ret |= (RegNum & 0x0FFFFFFF);
return Ret;
} else {
// Some special-use registers are actually physical registers.
// Encode this as the register class ID of 0 and the real register ID.
return Reg & 0x0FFFFFFF;
}
}
MCOperand NVPTXAsmPrinter::GetSymbolRef(const MCSymbol *Symbol) {
const MCExpr *Expr;
Expr = MCSymbolRefExpr::Create(Symbol, MCSymbolRefExpr::VK_None,
OutContext);
return MCOperand::CreateExpr(Expr);
}
void NVPTXAsmPrinter::printReturnValStr(const Function *F, raw_ostream &O) {
const DataLayout *TD = TM.getDataLayout();
const TargetLowering *TLI = nvptxSubtarget->getTargetLowering();
Type *Ty = F->getReturnType();
bool isABI = (nvptxSubtarget->getSmVersion() >= 20);
if (Ty->getTypeID() == Type::VoidTyID)
return;
O << " (";
if (isABI) {
if (Ty->isFloatingPointTy() || Ty->isIntegerTy()) {
unsigned size = 0;
if (const IntegerType *ITy = dyn_cast<IntegerType>(Ty)) {
size = ITy->getBitWidth();
if (size < 32)
size = 32;
} else {
assert(Ty->isFloatingPointTy() && "Floating point type expected here");
size = Ty->getPrimitiveSizeInBits();
}
O << ".param .b" << size << " func_retval0";
} else if (isa<PointerType>(Ty)) {
O << ".param .b" << TLI->getPointerTy().getSizeInBits()
<< " func_retval0";
} else if ((Ty->getTypeID() == Type::StructTyID) || isa<VectorType>(Ty)) {
unsigned totalsz = TD->getTypeAllocSize(Ty);
unsigned retAlignment = 0;
if (!llvm::getAlign(*F, 0, retAlignment))
retAlignment = TD->getABITypeAlignment(Ty);
O << ".param .align " << retAlignment << " .b8 func_retval0[" << totalsz
<< "]";
} else
llvm_unreachable("Unknown return type");
} else {
SmallVector<EVT, 16> vtparts;
ComputeValueVTs(*TLI, Ty, vtparts);
unsigned idx = 0;
for (unsigned i = 0, e = vtparts.size(); i != e; ++i) {
unsigned elems = 1;
EVT elemtype = vtparts[i];
if (vtparts[i].isVector()) {
elems = vtparts[i].getVectorNumElements();
elemtype = vtparts[i].getVectorElementType();
}
for (unsigned j = 0, je = elems; j != je; ++j) {
unsigned sz = elemtype.getSizeInBits();
if (elemtype.isInteger() && (sz < 32))
sz = 32;
O << ".reg .b" << sz << " func_retval" << idx;
if (j < je - 1)
O << ", ";
++idx;
}
if (i < e - 1)
O << ", ";
}
}
O << ") ";
return;
}
void NVPTXAsmPrinter::printReturnValStr(const MachineFunction &MF,
raw_ostream &O) {
const Function *F = MF.getFunction();
printReturnValStr(F, O);
}
// Return true if MBB is the header of a loop marked with
// llvm.loop.unroll.disable.
// TODO: consider "#pragma unroll 1" which is equivalent to "#pragma nounroll".
bool NVPTXAsmPrinter::isLoopHeaderOfNoUnroll(
const MachineBasicBlock &MBB) const {
MachineLoopInfo &LI = getAnalysis<MachineLoopInfo>();
// TODO: isLoopHeader() should take "const MachineBasicBlock *".
// We insert .pragma "nounroll" only to the loop header.
if (!LI.isLoopHeader(const_cast<MachineBasicBlock *>(&MBB)))
return false;
// llvm.loop.unroll.disable is marked on the back edges of a loop. Therefore,
// we iterate through each back edge of the loop with header MBB, and check
// whether its metadata contains llvm.loop.unroll.disable.
for (auto I = MBB.pred_begin(); I != MBB.pred_end(); ++I) {
const MachineBasicBlock *PMBB = *I;
if (LI.getLoopFor(PMBB) != LI.getLoopFor(&MBB)) {
// Edges from other loops to MBB are not back edges.
continue;
}
if (const BasicBlock *PBB = PMBB->getBasicBlock()) {
if (MDNode *LoopID = PBB->getTerminator()->getMetadata("llvm.loop")) {
if (GetUnrollMetadata(LoopID, "llvm.loop.unroll.disable"))
return true;
}
}
}
return false;
}
void NVPTXAsmPrinter::EmitBasicBlockStart(const MachineBasicBlock &MBB) const {
AsmPrinter::EmitBasicBlockStart(MBB);
if (isLoopHeaderOfNoUnroll(MBB))
OutStreamer.EmitRawText(StringRef("\t.pragma \"nounroll\";\n"));
}
void NVPTXAsmPrinter::EmitFunctionEntryLabel() {
SmallString<128> Str;
raw_svector_ostream O(Str);
if (!GlobalsEmitted) {
emitGlobals(*MF->getFunction()->getParent());
GlobalsEmitted = true;
}
// Set up
MRI = &MF->getRegInfo();
F = MF->getFunction();
emitLinkageDirective(F, O);
if (llvm::isKernelFunction(*F))
O << ".entry ";
else {
O << ".func ";
printReturnValStr(*MF, O);
}
O << *CurrentFnSym;
emitFunctionParamList(*MF, O);
if (llvm::isKernelFunction(*F))
emitKernelFunctionDirectives(*F, O);
OutStreamer.EmitRawText(O.str());
prevDebugLoc = DebugLoc();
}
void NVPTXAsmPrinter::EmitFunctionBodyStart() {
VRegMapping.clear();
OutStreamer.EmitRawText(StringRef("{\n"));
setAndEmitFunctionVirtualRegisters(*MF);
SmallString<128> Str;
raw_svector_ostream O(Str);
emitDemotedVars(MF->getFunction(), O);
OutStreamer.EmitRawText(O.str());
}
void NVPTXAsmPrinter::EmitFunctionBodyEnd() {
OutStreamer.EmitRawText(StringRef("}\n"));
VRegMapping.clear();
}
void NVPTXAsmPrinter::emitImplicitDef(const MachineInstr *MI) const {
unsigned RegNo = MI->getOperand(0).getReg();
if (TargetRegisterInfo::isVirtualRegister(RegNo)) {
OutStreamer.AddComment(Twine("implicit-def: ") +
getVirtualRegisterName(RegNo));
} else {
OutStreamer.AddComment(Twine("implicit-def: ") +
nvptxSubtarget->getRegisterInfo()->getName(RegNo));
}
OutStreamer.AddBlankLine();
}
void NVPTXAsmPrinter::emitKernelFunctionDirectives(const Function &F,
raw_ostream &O) const {
// If the NVVM IR has some of reqntid* specified, then output
// the reqntid directive, and set the unspecified ones to 1.
// If none of reqntid* is specified, don't output reqntid directive.
unsigned reqntidx, reqntidy, reqntidz;
bool specified = false;
if (!llvm::getReqNTIDx(F, reqntidx))
reqntidx = 1;
else
specified = true;
if (!llvm::getReqNTIDy(F, reqntidy))
reqntidy = 1;
else
specified = true;
if (!llvm::getReqNTIDz(F, reqntidz))
reqntidz = 1;
else
specified = true;
if (specified)
O << ".reqntid " << reqntidx << ", " << reqntidy << ", " << reqntidz
<< "\n";
// If the NVVM IR has some of maxntid* specified, then output
// the maxntid directive, and set the unspecified ones to 1.
// If none of maxntid* is specified, don't output maxntid directive.
unsigned maxntidx, maxntidy, maxntidz;
specified = false;
if (!llvm::getMaxNTIDx(F, maxntidx))
maxntidx = 1;
else
specified = true;
if (!llvm::getMaxNTIDy(F, maxntidy))
maxntidy = 1;
else
specified = true;
if (!llvm::getMaxNTIDz(F, maxntidz))
maxntidz = 1;
else
specified = true;
if (specified)
O << ".maxntid " << maxntidx << ", " << maxntidy << ", " << maxntidz
<< "\n";
unsigned mincta;
if (llvm::getMinCTASm(F, mincta))
O << ".minnctapersm " << mincta << "\n";
}
std::string
NVPTXAsmPrinter::getVirtualRegisterName(unsigned Reg) const {
const TargetRegisterClass *RC = MRI->getRegClass(Reg);
std::string Name;
raw_string_ostream NameStr(Name);
VRegRCMap::const_iterator I = VRegMapping.find(RC);
assert(I != VRegMapping.end() && "Bad register class");
const DenseMap<unsigned, unsigned> &RegMap = I->second;
VRegMap::const_iterator VI = RegMap.find(Reg);
assert(VI != RegMap.end() && "Bad virtual register");
unsigned MappedVR = VI->second;
NameStr << getNVPTXRegClassStr(RC) << MappedVR;
NameStr.flush();
return Name;
}
void NVPTXAsmPrinter::emitVirtualRegister(unsigned int vr,
raw_ostream &O) {
O << getVirtualRegisterName(vr);
}
void NVPTXAsmPrinter::printVecModifiedImmediate(
const MachineOperand &MO, const char *Modifier, raw_ostream &O) {
static const char vecelem[] = { '0', '1', '2', '3', '0', '1', '2', '3' };
int Imm = (int) MO.getImm();
if (0 == strcmp(Modifier, "vecelem"))
O << "_" << vecelem[Imm];
else if (0 == strcmp(Modifier, "vecv4comm1")) {
if ((Imm < 0) || (Imm > 3))
O << "//";
} else if (0 == strcmp(Modifier, "vecv4comm2")) {
if ((Imm < 4) || (Imm > 7))
O << "//";
} else if (0 == strcmp(Modifier, "vecv4pos")) {
if (Imm < 0)
Imm = 0;
O << "_" << vecelem[Imm % 4];
} else if (0 == strcmp(Modifier, "vecv2comm1")) {
if ((Imm < 0) || (Imm > 1))
O << "//";
} else if (0 == strcmp(Modifier, "vecv2comm2")) {
if ((Imm < 2) || (Imm > 3))
O << "//";
} else if (0 == strcmp(Modifier, "vecv2pos")) {
if (Imm < 0)
Imm = 0;
O << "_" << vecelem[Imm % 2];
} else
llvm_unreachable("Unknown Modifier on immediate operand");
}
void NVPTXAsmPrinter::emitDeclaration(const Function *F, raw_ostream &O) {
emitLinkageDirective(F, O);
if (llvm::isKernelFunction(*F))
O << ".entry ";
else
O << ".func ";
printReturnValStr(F, O);
O << *getSymbol(F) << "\n";
emitFunctionParamList(F, O);
O << ";\n";
}
static bool usedInGlobalVarDef(const Constant *C) {
if (!C)
return false;
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) {
if (GV->getName() == "llvm.used")
return false;
return true;
}
[C++11] Add range based accessors for the Use-Def chain of a Value. This requires a number of steps. 1) Move value_use_iterator into the Value class as an implementation detail 2) Change it to actually be a *Use* iterator rather than a *User* iterator. 3) Add an adaptor which is a User iterator that always looks through the Use to the User. 4) Wrap these in Value::use_iterator and Value::user_iterator typedefs. 5) Add the range adaptors as Value::uses() and Value::users(). 6) Update *all* of the callers to correctly distinguish between whether they wanted a use_iterator (and to explicitly dig out the User when needed), or a user_iterator which makes the Use itself totally opaque. Because #6 requires churning essentially everything that walked the Use-Def chains, I went ahead and added all of the range adaptors and switched them to range-based loops where appropriate. Also because the renaming requires at least churning every line of code, it didn't make any sense to split these up into multiple commits -- all of which would touch all of the same lies of code. The result is still not quite optimal. The Value::use_iterator is a nice regular iterator, but Value::user_iterator is an iterator over User*s rather than over the User objects themselves. As a consequence, it fits a bit awkwardly into the range-based world and it has the weird extra-dereferencing 'operator->' that so many of our iterators have. I think this could be fixed by providing something which transforms a range of T&s into a range of T*s, but that *can* be separated into another patch, and it isn't yet 100% clear whether this is the right move. However, this change gets us most of the benefit and cleans up a substantial amount of code around Use and User. =] llvm-svn: 203364
2014-03-09 11:16:01 +08:00
for (const User *U : C->users())
if (const Constant *C = dyn_cast<Constant>(U))
if (usedInGlobalVarDef(C))
return true;
return false;
}
static bool usedInOneFunc(const User *U, Function const *&oneFunc) {
if (const GlobalVariable *othergv = dyn_cast<GlobalVariable>(U)) {
if (othergv->getName() == "llvm.used")
return true;
}
if (const Instruction *instr = dyn_cast<Instruction>(U)) {
if (instr->getParent() && instr->getParent()->getParent()) {
const Function *curFunc = instr->getParent()->getParent();
if (oneFunc && (curFunc != oneFunc))
return false;
oneFunc = curFunc;
return true;
} else
return false;
}
[C++11] Add range based accessors for the Use-Def chain of a Value. This requires a number of steps. 1) Move value_use_iterator into the Value class as an implementation detail 2) Change it to actually be a *Use* iterator rather than a *User* iterator. 3) Add an adaptor which is a User iterator that always looks through the Use to the User. 4) Wrap these in Value::use_iterator and Value::user_iterator typedefs. 5) Add the range adaptors as Value::uses() and Value::users(). 6) Update *all* of the callers to correctly distinguish between whether they wanted a use_iterator (and to explicitly dig out the User when needed), or a user_iterator which makes the Use itself totally opaque. Because #6 requires churning essentially everything that walked the Use-Def chains, I went ahead and added all of the range adaptors and switched them to range-based loops where appropriate. Also because the renaming requires at least churning every line of code, it didn't make any sense to split these up into multiple commits -- all of which would touch all of the same lies of code. The result is still not quite optimal. The Value::use_iterator is a nice regular iterator, but Value::user_iterator is an iterator over User*s rather than over the User objects themselves. As a consequence, it fits a bit awkwardly into the range-based world and it has the weird extra-dereferencing 'operator->' that so many of our iterators have. I think this could be fixed by providing something which transforms a range of T&s into a range of T*s, but that *can* be separated into another patch, and it isn't yet 100% clear whether this is the right move. However, this change gets us most of the benefit and cleans up a substantial amount of code around Use and User. =] llvm-svn: 203364
2014-03-09 11:16:01 +08:00
for (const User *UU : U->users())
if (!usedInOneFunc(UU, oneFunc))
return false;
[C++11] Add range based accessors for the Use-Def chain of a Value. This requires a number of steps. 1) Move value_use_iterator into the Value class as an implementation detail 2) Change it to actually be a *Use* iterator rather than a *User* iterator. 3) Add an adaptor which is a User iterator that always looks through the Use to the User. 4) Wrap these in Value::use_iterator and Value::user_iterator typedefs. 5) Add the range adaptors as Value::uses() and Value::users(). 6) Update *all* of the callers to correctly distinguish between whether they wanted a use_iterator (and to explicitly dig out the User when needed), or a user_iterator which makes the Use itself totally opaque. Because #6 requires churning essentially everything that walked the Use-Def chains, I went ahead and added all of the range adaptors and switched them to range-based loops where appropriate. Also because the renaming requires at least churning every line of code, it didn't make any sense to split these up into multiple commits -- all of which would touch all of the same lies of code. The result is still not quite optimal. The Value::use_iterator is a nice regular iterator, but Value::user_iterator is an iterator over User*s rather than over the User objects themselves. As a consequence, it fits a bit awkwardly into the range-based world and it has the weird extra-dereferencing 'operator->' that so many of our iterators have. I think this could be fixed by providing something which transforms a range of T&s into a range of T*s, but that *can* be separated into another patch, and it isn't yet 100% clear whether this is the right move. However, this change gets us most of the benefit and cleans up a substantial amount of code around Use and User. =] llvm-svn: 203364
2014-03-09 11:16:01 +08:00
return true;
}
/* Find out if a global variable can be demoted to local scope.
* Currently, this is valid for CUDA shared variables, which have local
* scope and global lifetime. So the conditions to check are :
* 1. Is the global variable in shared address space?
* 2. Does it have internal linkage?
* 3. Is the global variable referenced only in one function?
*/
static bool canDemoteGlobalVar(const GlobalVariable *gv, Function const *&f) {
if (!gv->hasInternalLinkage())
return false;
const PointerType *Pty = gv->getType();
if (Pty->getAddressSpace() != llvm::ADDRESS_SPACE_SHARED)
return false;
const Function *oneFunc = nullptr;
bool flag = usedInOneFunc(gv, oneFunc);
if (!flag)
return false;
if (!oneFunc)
return false;
f = oneFunc;
return true;
}
static bool useFuncSeen(const Constant *C,
llvm::DenseMap<const Function *, bool> &seenMap) {
[C++11] Add range based accessors for the Use-Def chain of a Value. This requires a number of steps. 1) Move value_use_iterator into the Value class as an implementation detail 2) Change it to actually be a *Use* iterator rather than a *User* iterator. 3) Add an adaptor which is a User iterator that always looks through the Use to the User. 4) Wrap these in Value::use_iterator and Value::user_iterator typedefs. 5) Add the range adaptors as Value::uses() and Value::users(). 6) Update *all* of the callers to correctly distinguish between whether they wanted a use_iterator (and to explicitly dig out the User when needed), or a user_iterator which makes the Use itself totally opaque. Because #6 requires churning essentially everything that walked the Use-Def chains, I went ahead and added all of the range adaptors and switched them to range-based loops where appropriate. Also because the renaming requires at least churning every line of code, it didn't make any sense to split these up into multiple commits -- all of which would touch all of the same lies of code. The result is still not quite optimal. The Value::use_iterator is a nice regular iterator, but Value::user_iterator is an iterator over User*s rather than over the User objects themselves. As a consequence, it fits a bit awkwardly into the range-based world and it has the weird extra-dereferencing 'operator->' that so many of our iterators have. I think this could be fixed by providing something which transforms a range of T&s into a range of T*s, but that *can* be separated into another patch, and it isn't yet 100% clear whether this is the right move. However, this change gets us most of the benefit and cleans up a substantial amount of code around Use and User. =] llvm-svn: 203364
2014-03-09 11:16:01 +08:00
for (const User *U : C->users()) {
if (const Constant *cu = dyn_cast<Constant>(U)) {
if (useFuncSeen(cu, seenMap))
return true;
[C++11] Add range based accessors for the Use-Def chain of a Value. This requires a number of steps. 1) Move value_use_iterator into the Value class as an implementation detail 2) Change it to actually be a *Use* iterator rather than a *User* iterator. 3) Add an adaptor which is a User iterator that always looks through the Use to the User. 4) Wrap these in Value::use_iterator and Value::user_iterator typedefs. 5) Add the range adaptors as Value::uses() and Value::users(). 6) Update *all* of the callers to correctly distinguish between whether they wanted a use_iterator (and to explicitly dig out the User when needed), or a user_iterator which makes the Use itself totally opaque. Because #6 requires churning essentially everything that walked the Use-Def chains, I went ahead and added all of the range adaptors and switched them to range-based loops where appropriate. Also because the renaming requires at least churning every line of code, it didn't make any sense to split these up into multiple commits -- all of which would touch all of the same lies of code. The result is still not quite optimal. The Value::use_iterator is a nice regular iterator, but Value::user_iterator is an iterator over User*s rather than over the User objects themselves. As a consequence, it fits a bit awkwardly into the range-based world and it has the weird extra-dereferencing 'operator->' that so many of our iterators have. I think this could be fixed by providing something which transforms a range of T&s into a range of T*s, but that *can* be separated into another patch, and it isn't yet 100% clear whether this is the right move. However, this change gets us most of the benefit and cleans up a substantial amount of code around Use and User. =] llvm-svn: 203364
2014-03-09 11:16:01 +08:00
} else if (const Instruction *I = dyn_cast<Instruction>(U)) {
const BasicBlock *bb = I->getParent();
if (!bb)
continue;
const Function *caller = bb->getParent();
if (!caller)
continue;
if (seenMap.find(caller) != seenMap.end())
return true;
}
}
return false;
}
void NVPTXAsmPrinter::emitDeclarations(const Module &M, raw_ostream &O) {
llvm::DenseMap<const Function *, bool> seenMap;
for (Module::const_iterator FI = M.begin(), FE = M.end(); FI != FE; ++FI) {
const Function *F = FI;
if (F->isDeclaration()) {
if (F->use_empty())
continue;
if (F->getIntrinsicID())
continue;
emitDeclaration(F, O);
continue;
}
[C++11] Add range based accessors for the Use-Def chain of a Value. This requires a number of steps. 1) Move value_use_iterator into the Value class as an implementation detail 2) Change it to actually be a *Use* iterator rather than a *User* iterator. 3) Add an adaptor which is a User iterator that always looks through the Use to the User. 4) Wrap these in Value::use_iterator and Value::user_iterator typedefs. 5) Add the range adaptors as Value::uses() and Value::users(). 6) Update *all* of the callers to correctly distinguish between whether they wanted a use_iterator (and to explicitly dig out the User when needed), or a user_iterator which makes the Use itself totally opaque. Because #6 requires churning essentially everything that walked the Use-Def chains, I went ahead and added all of the range adaptors and switched them to range-based loops where appropriate. Also because the renaming requires at least churning every line of code, it didn't make any sense to split these up into multiple commits -- all of which would touch all of the same lies of code. The result is still not quite optimal. The Value::use_iterator is a nice regular iterator, but Value::user_iterator is an iterator over User*s rather than over the User objects themselves. As a consequence, it fits a bit awkwardly into the range-based world and it has the weird extra-dereferencing 'operator->' that so many of our iterators have. I think this could be fixed by providing something which transforms a range of T&s into a range of T*s, but that *can* be separated into another patch, and it isn't yet 100% clear whether this is the right move. However, this change gets us most of the benefit and cleans up a substantial amount of code around Use and User. =] llvm-svn: 203364
2014-03-09 11:16:01 +08:00
for (const User *U : F->users()) {
if (const Constant *C = dyn_cast<Constant>(U)) {
if (usedInGlobalVarDef(C)) {
// The use is in the initialization of a global variable
// that is a function pointer, so print a declaration
// for the original function
emitDeclaration(F, O);
break;
}
// Emit a declaration of this function if the function that
// uses this constant expr has already been seen.
if (useFuncSeen(C, seenMap)) {
emitDeclaration(F, O);
break;
}
}
[C++11] Add range based accessors for the Use-Def chain of a Value. This requires a number of steps. 1) Move value_use_iterator into the Value class as an implementation detail 2) Change it to actually be a *Use* iterator rather than a *User* iterator. 3) Add an adaptor which is a User iterator that always looks through the Use to the User. 4) Wrap these in Value::use_iterator and Value::user_iterator typedefs. 5) Add the range adaptors as Value::uses() and Value::users(). 6) Update *all* of the callers to correctly distinguish between whether they wanted a use_iterator (and to explicitly dig out the User when needed), or a user_iterator which makes the Use itself totally opaque. Because #6 requires churning essentially everything that walked the Use-Def chains, I went ahead and added all of the range adaptors and switched them to range-based loops where appropriate. Also because the renaming requires at least churning every line of code, it didn't make any sense to split these up into multiple commits -- all of which would touch all of the same lies of code. The result is still not quite optimal. The Value::use_iterator is a nice regular iterator, but Value::user_iterator is an iterator over User*s rather than over the User objects themselves. As a consequence, it fits a bit awkwardly into the range-based world and it has the weird extra-dereferencing 'operator->' that so many of our iterators have. I think this could be fixed by providing something which transforms a range of T&s into a range of T*s, but that *can* be separated into another patch, and it isn't yet 100% clear whether this is the right move. However, this change gets us most of the benefit and cleans up a substantial amount of code around Use and User. =] llvm-svn: 203364
2014-03-09 11:16:01 +08:00
if (!isa<Instruction>(U))
continue;
[C++11] Add range based accessors for the Use-Def chain of a Value. This requires a number of steps. 1) Move value_use_iterator into the Value class as an implementation detail 2) Change it to actually be a *Use* iterator rather than a *User* iterator. 3) Add an adaptor which is a User iterator that always looks through the Use to the User. 4) Wrap these in Value::use_iterator and Value::user_iterator typedefs. 5) Add the range adaptors as Value::uses() and Value::users(). 6) Update *all* of the callers to correctly distinguish between whether they wanted a use_iterator (and to explicitly dig out the User when needed), or a user_iterator which makes the Use itself totally opaque. Because #6 requires churning essentially everything that walked the Use-Def chains, I went ahead and added all of the range adaptors and switched them to range-based loops where appropriate. Also because the renaming requires at least churning every line of code, it didn't make any sense to split these up into multiple commits -- all of which would touch all of the same lies of code. The result is still not quite optimal. The Value::use_iterator is a nice regular iterator, but Value::user_iterator is an iterator over User*s rather than over the User objects themselves. As a consequence, it fits a bit awkwardly into the range-based world and it has the weird extra-dereferencing 'operator->' that so many of our iterators have. I think this could be fixed by providing something which transforms a range of T&s into a range of T*s, but that *can* be separated into another patch, and it isn't yet 100% clear whether this is the right move. However, this change gets us most of the benefit and cleans up a substantial amount of code around Use and User. =] llvm-svn: 203364
2014-03-09 11:16:01 +08:00
const Instruction *instr = cast<Instruction>(U);
const BasicBlock *bb = instr->getParent();
if (!bb)
continue;
const Function *caller = bb->getParent();
if (!caller)
continue;
// If a caller has already been seen, then the caller is
// appearing in the module before the callee. so print out
// a declaration for the callee.
if (seenMap.find(caller) != seenMap.end()) {
emitDeclaration(F, O);
break;
}
}
seenMap[F] = true;
}
}
void NVPTXAsmPrinter::recordAndEmitFilenames(Module &M) {
DebugInfoFinder DbgFinder;
DbgFinder.processModule(M);
unsigned i = 1;
for (DICompileUnit DIUnit : DbgFinder.compile_units()) {
StringRef Filename(DIUnit.getFilename());
StringRef Dirname(DIUnit.getDirectory());
SmallString<128> FullPathName = Dirname;
if (!Dirname.empty() && !sys::path::is_absolute(Filename)) {
sys::path::append(FullPathName, Filename);
Filename = FullPathName;
}
if (filenameMap.find(Filename) != filenameMap.end())
continue;
filenameMap[Filename] = i;
OutStreamer.EmitDwarfFileDirective(i, "", Filename);
++i;
}
for (DISubprogram SP : DbgFinder.subprograms()) {
StringRef Filename(SP.getFilename());
StringRef Dirname(SP.getDirectory());
SmallString<128> FullPathName = Dirname;
if (!Dirname.empty() && !sys::path::is_absolute(Filename)) {
sys::path::append(FullPathName, Filename);
Filename = FullPathName;
}
if (filenameMap.find(Filename) != filenameMap.end())
continue;
filenameMap[Filename] = i;
++i;
}
}
bool NVPTXAsmPrinter::doInitialization(Module &M) {
// Construct a default subtarget off of the TargetMachine defaults. The
// rest of NVPTX isn't friendly to change subtargets per function and
// so the default TargetMachine will have all of the options.
StringRef TT = TM.getTargetTriple();
StringRef CPU = TM.getTargetCPU();
StringRef FS = TM.getTargetFeatureString();
const NVPTXTargetMachine &NTM = static_cast<const NVPTXTargetMachine &>(TM);
const NVPTXSubtarget STI(TT, CPU, FS, NTM);
SmallString<128> Str1;
raw_svector_ostream OS1(Str1);
MMI = getAnalysisIfAvailable<MachineModuleInfo>();
MMI->AnalyzeModule(M);
// We need to call the parent's one explicitly.
//bool Result = AsmPrinter::doInitialization(M);
// Initialize TargetLoweringObjectFile.
const_cast<TargetLoweringObjectFile &>(getObjFileLowering())
.Initialize(OutContext, TM);
Mang = new Mangler(TM.getDataLayout());
// Emit header before any dwarf directives are emitted below.
emitHeader(M, OS1, STI);
OutStreamer.EmitRawText(OS1.str());
// Already commented out
//bool Result = AsmPrinter::doInitialization(M);
// Emit module-level inline asm if it exists.
if (!M.getModuleInlineAsm().empty()) {
OutStreamer.AddComment("Start of file scope inline assembly");
OutStreamer.AddBlankLine();
OutStreamer.EmitRawText(StringRef(M.getModuleInlineAsm()));
OutStreamer.AddBlankLine();
OutStreamer.AddComment("End of file scope inline assembly");
OutStreamer.AddBlankLine();
}
// If we're not NVCL we're CUDA, go ahead and emit filenames.
if (Triple(TM.getTargetTriple()).getOS() != Triple::NVCL)
recordAndEmitFilenames(M);
GlobalsEmitted = false;
return false; // success
}
void NVPTXAsmPrinter::emitGlobals(const Module &M) {
SmallString<128> Str2;
raw_svector_ostream OS2(Str2);
emitDeclarations(M, OS2);
// As ptxas does not support forward references of globals, we need to first
// sort the list of module-level globals in def-use order. We visit each
// global variable in order, and ensure that we emit it *after* its dependent
// globals. We use a little extra memory maintaining both a set and a list to
// have fast searches while maintaining a strict ordering.
SmallVector<const GlobalVariable *, 8> Globals;
DenseSet<const GlobalVariable *> GVVisited;
DenseSet<const GlobalVariable *> GVVisiting;
// Visit each global variable, in order
for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
I != E; ++I)
VisitGlobalVariableForEmission(I, Globals, GVVisited, GVVisiting);
assert(GVVisited.size() == M.getGlobalList().size() &&
"Missed a global variable");
assert(GVVisiting.size() == 0 && "Did not fully process a global variable");
// Print out module-level global variables in proper order
for (unsigned i = 0, e = Globals.size(); i != e; ++i)
printModuleLevelGV(Globals[i], OS2);
OS2 << '\n';
OutStreamer.EmitRawText(OS2.str());
}
void NVPTXAsmPrinter::emitHeader(Module &M, raw_ostream &O,
const NVPTXSubtarget &STI) {
O << "//\n";
O << "// Generated by LLVM NVPTX Back-End\n";
O << "//\n";
O << "\n";
unsigned PTXVersion = STI.getPTXVersion();
O << ".version " << (PTXVersion / 10) << "." << (PTXVersion % 10) << "\n";
O << ".target ";
O << STI.getTargetName();
const NVPTXTargetMachine &NTM = static_cast<const NVPTXTargetMachine &>(TM);
if (NTM.getDrvInterface() == NVPTX::NVCL)
O << ", texmode_independent";
else {
if (!STI.hasDouble())
O << ", map_f64_to_f32";
}
if (MAI->doesSupportDebugInformation())
O << ", debug";
O << "\n";
O << ".address_size ";
if (NTM.is64Bit())
O << "64";
else
O << "32";
O << "\n";
O << "\n";
}
bool NVPTXAsmPrinter::doFinalization(Module &M) {
// If we did not emit any functions, then the global declarations have not
// yet been emitted.
if (!GlobalsEmitted) {
emitGlobals(M);
GlobalsEmitted = true;
}
// XXX Temproarily remove global variables so that doFinalization() will not
// emit them again (global variables are emitted at beginning).
Module::GlobalListType &global_list = M.getGlobalList();
int i, n = global_list.size();
GlobalVariable **gv_array = new GlobalVariable *[n];
// first, back-up GlobalVariable in gv_array
i = 0;
for (Module::global_iterator I = global_list.begin(), E = global_list.end();
I != E; ++I)
gv_array[i++] = &*I;
// second, empty global_list
while (!global_list.empty())
global_list.remove(global_list.begin());
// call doFinalization
bool ret = AsmPrinter::doFinalization(M);
// now we restore global variables
for (i = 0; i < n; i++)
global_list.insert(global_list.end(), gv_array[i]);
clearAnnotationCache(&M);
delete[] gv_array;
return ret;
//bool Result = AsmPrinter::doFinalization(M);
// Instead of calling the parents doFinalization, we may
// clone parents doFinalization and customize here.
// Currently, we if NVISA out the EmitGlobals() in
// parent's doFinalization, which is too intrusive.
//
// Same for the doInitialization.
//return Result;
}
// This function emits appropriate linkage directives for
// functions and global variables.
//
// extern function declaration -> .extern
// extern function definition -> .visible
// external global variable with init -> .visible
// external without init -> .extern
// appending -> not allowed, assert.
// for any linkage other than
// internal, private, linker_private,
// linker_private_weak, linker_private_weak_def_auto,
// we emit -> .weak.
void NVPTXAsmPrinter::emitLinkageDirective(const GlobalValue *V,
raw_ostream &O) {
if (static_cast<NVPTXTargetMachine &>(TM).getDrvInterface() == NVPTX::CUDA) {
if (V->hasExternalLinkage()) {
if (isa<GlobalVariable>(V)) {
const GlobalVariable *GVar = cast<GlobalVariable>(V);
if (GVar) {
if (GVar->hasInitializer())
O << ".visible ";
else
O << ".extern ";
}
} else if (V->isDeclaration())
O << ".extern ";
else
O << ".visible ";
} else if (V->hasAppendingLinkage()) {
std::string msg;
msg.append("Error: ");
msg.append("Symbol ");
if (V->hasName())
msg.append(V->getName());
msg.append("has unsupported appending linkage type");
llvm_unreachable(msg.c_str());
} else if (!V->hasInternalLinkage() &&
!V->hasPrivateLinkage()) {
O << ".weak ";
}
}
}
void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
raw_ostream &O,
bool processDemoted) {
// Skip meta data
if (GVar->hasSection()) {
if (GVar->getSection() == StringRef("llvm.metadata"))
return;
}
// Skip LLVM intrinsic global variables
if (GVar->getName().startswith("llvm.") ||
GVar->getName().startswith("nvvm."))
return;
const DataLayout *TD = TM.getDataLayout();
// GlobalVariables are always constant pointers themselves.
const PointerType *PTy = GVar->getType();
Type *ETy = PTy->getElementType();
if (GVar->hasExternalLinkage()) {
if (GVar->hasInitializer())
O << ".visible ";
else
O << ".extern ";
} else if (GVar->hasLinkOnceLinkage() || GVar->hasWeakLinkage() ||
GVar->hasAvailableExternallyLinkage() ||
GVar->hasCommonLinkage()) {
O << ".weak ";
}
if (llvm::isTexture(*GVar)) {
O << ".global .texref " << llvm::getTextureName(*GVar) << ";\n";
return;
}
if (llvm::isSurface(*GVar)) {
O << ".global .surfref " << llvm::getSurfaceName(*GVar) << ";\n";
return;
}
if (GVar->isDeclaration()) {
// (extern) declarations, no definition or initializer
// Currently the only known declaration is for an automatic __local
// (.shared) promoted to global.
emitPTXGlobalVariable(GVar, O);
O << ";\n";
return;
}
if (llvm::isSampler(*GVar)) {
O << ".global .samplerref " << llvm::getSamplerName(*GVar);
const Constant *Initializer = nullptr;
if (GVar->hasInitializer())
Initializer = GVar->getInitializer();
const ConstantInt *CI = nullptr;
if (Initializer)
CI = dyn_cast<ConstantInt>(Initializer);
if (CI) {
unsigned sample = CI->getZExtValue();
O << " = { ";
for (int i = 0,
addr = ((sample & __CLK_ADDRESS_MASK) >> __CLK_ADDRESS_BASE);
i < 3; i++) {
O << "addr_mode_" << i << " = ";
switch (addr) {
case 0:
O << "wrap";
break;
case 1:
O << "clamp_to_border";
break;
case 2:
O << "clamp_to_edge";
break;
case 3:
O << "wrap";
break;
case 4:
O << "mirror";
break;
}
O << ", ";
}
O << "filter_mode = ";
switch ((sample & __CLK_FILTER_MASK) >> __CLK_FILTER_BASE) {
case 0:
O << "nearest";
break;
case 1:
O << "linear";
break;
case 2:
llvm_unreachable("Anisotropic filtering is not supported");
default:
O << "nearest";
break;
}
if (!((sample & __CLK_NORMALIZED_MASK) >> __CLK_NORMALIZED_BASE)) {
O << ", force_unnormalized_coords = 1";
}
O << " }";
}
O << ";\n";
return;
}
if (GVar->hasPrivateLinkage()) {
if (!strncmp(GVar->getName().data(), "unrollpragma", 12))
return;
// FIXME - need better way (e.g. Metadata) to avoid generating this global
if (!strncmp(GVar->getName().data(), "filename", 8))
return;
if (GVar->use_empty())
return;
}
const Function *demotedFunc = nullptr;
if (!processDemoted && canDemoteGlobalVar(GVar, demotedFunc)) {
O << "// " << GVar->getName() << " has been demoted\n";
if (localDecls.find(demotedFunc) != localDecls.end())
localDecls[demotedFunc].push_back(GVar);
else {
std::vector<const GlobalVariable *> temp;
temp.push_back(GVar);
localDecls[demotedFunc] = temp;
}
return;
}
O << ".";
emitPTXAddressSpace(PTy->getAddressSpace(), O);
if (isManaged(*GVar)) {
O << " .attribute(.managed)";
}
if (GVar->getAlignment() == 0)
O << " .align " << (int) TD->getPrefTypeAlignment(ETy);
else
O << " .align " << GVar->getAlignment();
if (ETy->isFloatingPointTy() || ETy->isIntegerTy() || ETy->isPointerTy()) {
O << " .";
// Special case: ABI requires that we use .u8 for predicates
if (ETy->isIntegerTy(1))
O << "u8";
else
O << getPTXFundamentalTypeStr(ETy, false);
O << " ";
O << *getSymbol(GVar);
// Ptx allows variable initilization only for constant and global state
// spaces.
if (GVar->hasInitializer()) {
if ((PTy->getAddressSpace() == llvm::ADDRESS_SPACE_GLOBAL) ||
(PTy->getAddressSpace() == llvm::ADDRESS_SPACE_CONST)) {
const Constant *Initializer = GVar->getInitializer();
// 'undef' is treated as there is no value spefied.
if (!Initializer->isNullValue() && !isa<UndefValue>(Initializer)) {
O << " = ";
printScalarConstant(Initializer, O);
}
} else {
// The frontend adds zero-initializer to variables that don't have an
// initial value, so skip warning for this case.
if (!GVar->getInitializer()->isNullValue()) {
std::string warnMsg =
("initial value of '" + GVar->getName() +
"' is not allowed in addrspace(" +
Twine(llvm::utostr_32(PTy->getAddressSpace())) + ")").str();
report_fatal_error(warnMsg.c_str());
}
}
}
} else {
unsigned int ElementSize = 0;
// Although PTX has direct support for struct type and array type and
// LLVM IR is very similar to PTX, the LLVM CodeGen does not support for
// targets that support these high level field accesses. Structs, arrays
// and vectors are lowered into arrays of bytes.
switch (ETy->getTypeID()) {
case Type::StructTyID:
case Type::ArrayTyID:
case Type::VectorTyID:
ElementSize = TD->getTypeStoreSize(ETy);
// Ptx allows variable initilization only for constant and
// global state spaces.
if (((PTy->getAddressSpace() == llvm::ADDRESS_SPACE_GLOBAL) ||
(PTy->getAddressSpace() == llvm::ADDRESS_SPACE_CONST)) &&
GVar->hasInitializer()) {
const Constant *Initializer = GVar->getInitializer();
if (!isa<UndefValue>(Initializer) && !Initializer->isNullValue()) {
AggBuffer aggBuffer(ElementSize, O, *this);
bufferAggregateConstant(Initializer, &aggBuffer);
if (aggBuffer.numSymbols) {
if (static_cast<const NVPTXTargetMachine &>(TM).is64Bit()) {
O << " .u64 " << *getSymbol(GVar) << "[";
O << ElementSize / 8;
} else {
O << " .u32 " << *getSymbol(GVar) << "[";
O << ElementSize / 4;
}
O << "]";
} else {
O << " .b8 " << *getSymbol(GVar) << "[";
O << ElementSize;
O << "]";
}
O << " = {";
aggBuffer.print();
O << "}";
} else {
O << " .b8 " << *getSymbol(GVar);
if (ElementSize) {
O << "[";
O << ElementSize;
O << "]";
}
}
} else {
O << " .b8 " << *getSymbol(GVar);
if (ElementSize) {
O << "[";
O << ElementSize;
O << "]";
}
}
break;
default:
llvm_unreachable("type not supported yet");
}
}
O << ";\n";
}
void NVPTXAsmPrinter::emitDemotedVars(const Function *f, raw_ostream &O) {
if (localDecls.find(f) == localDecls.end())
return;
std::vector<const GlobalVariable *> &gvars = localDecls[f];
for (unsigned i = 0, e = gvars.size(); i != e; ++i) {
O << "\t// demoted variable\n\t";
printModuleLevelGV(gvars[i], O, true);
}
}
void NVPTXAsmPrinter::emitPTXAddressSpace(unsigned int AddressSpace,
raw_ostream &O) const {
switch (AddressSpace) {
case llvm::ADDRESS_SPACE_LOCAL:
O << "local";
break;
case llvm::ADDRESS_SPACE_GLOBAL:
O << "global";
break;
case llvm::ADDRESS_SPACE_CONST:
O << "const";
break;
case llvm::ADDRESS_SPACE_SHARED:
O << "shared";
break;
default:
report_fatal_error("Bad address space found while emitting PTX");
break;
}
}
std::string
NVPTXAsmPrinter::getPTXFundamentalTypeStr(const Type *Ty, bool useB4PTR) const {
switch (Ty->getTypeID()) {
default:
llvm_unreachable("unexpected type");
break;
case Type::IntegerTyID: {
unsigned NumBits = cast<IntegerType>(Ty)->getBitWidth();
if (NumBits == 1)
return "pred";
else if (NumBits <= 64) {
std::string name = "u";
return name + utostr(NumBits);
} else {
llvm_unreachable("Integer too large");
break;
}
break;
}
case Type::FloatTyID:
return "f32";
case Type::DoubleTyID:
return "f64";
case Type::PointerTyID:
if (static_cast<const NVPTXTargetMachine &>(TM).is64Bit())
if (useB4PTR)
return "b64";
else
return "u64";
else if (useB4PTR)
return "b32";
else
return "u32";
}
llvm_unreachable("unexpected type");
return nullptr;
}
void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable *GVar,
raw_ostream &O) {
const DataLayout *TD = TM.getDataLayout();
// GlobalVariables are always constant pointers themselves.
const PointerType *PTy = GVar->getType();
Type *ETy = PTy->getElementType();
O << ".";
emitPTXAddressSpace(PTy->getAddressSpace(), O);
if (GVar->getAlignment() == 0)
O << " .align " << (int) TD->getPrefTypeAlignment(ETy);
else
O << " .align " << GVar->getAlignment();
if (ETy->isFloatingPointTy() || ETy->isIntegerTy() || ETy->isPointerTy()) {
O << " .";
O << getPTXFundamentalTypeStr(ETy);
O << " ";
O << *getSymbol(GVar);
return;
}
int64_t ElementSize = 0;
// Although PTX has direct support for struct type and array type and LLVM IR
// is very similar to PTX, the LLVM CodeGen does not support for targets that
// support these high level field accesses. Structs and arrays are lowered
// into arrays of bytes.
switch (ETy->getTypeID()) {
case Type::StructTyID:
case Type::ArrayTyID:
case Type::VectorTyID:
ElementSize = TD->getTypeStoreSize(ETy);
O << " .b8 " << *getSymbol(GVar) << "[";
if (ElementSize) {
O << itostr(ElementSize);
}
O << "]";
break;
default:
llvm_unreachable("type not supported yet");
}
return;
}
static unsigned int getOpenCLAlignment(const DataLayout *TD, Type *Ty) {
if (Ty->isSingleValueType())
return TD->getPrefTypeAlignment(Ty);
const ArrayType *ATy = dyn_cast<ArrayType>(Ty);
if (ATy)
return getOpenCLAlignment(TD, ATy->getElementType());
const StructType *STy = dyn_cast<StructType>(Ty);
if (STy) {
unsigned int alignStruct = 1;
// Go through each element of the struct and find the
// largest alignment.
for (unsigned i = 0, e = STy->getNumElements(); i != e; i++) {
Type *ETy = STy->getElementType(i);
unsigned int align = getOpenCLAlignment(TD, ETy);
if (align > alignStruct)
alignStruct = align;
}
return alignStruct;
}
const FunctionType *FTy = dyn_cast<FunctionType>(Ty);
if (FTy)
Revert the majority of the next patch in the address space series: r165941: Resubmit the changes to llvm core to update the functions to support different pointer sizes on a per address space basis. Despite this commit log, this change primarily changed stuff outside of VMCore, and those changes do not carry any tests for correctness (or even plausibility), and we have consistently found questionable or flat out incorrect cases in these changes. Most of them are probably correct, but we need to devise a system that makes it more clear when we have handled the address space concerns correctly, and ideally each pass that gets updated would receive an accompanying test case that exercises that pass specificaly w.r.t. alternate address spaces. However, from this commit, I have retained the new C API entry points. Those were an orthogonal change that probably should have been split apart, but they seem entirely good. In several places the changes were very obvious cleanups with no actual multiple address space code added; these I have not reverted when I spotted them. In a few other places there were merge conflicts due to a cleaner solution being implemented later, often not using address spaces at all. In those cases, I've preserved the new code which isn't address space dependent. This is part of my ongoing effort to clean out the partial address space code which carries high risk and low test coverage, and not likely to be finished before the 3.2 release looms closer. Duncan and I would both like to see the above issues addressed before we return to these changes. llvm-svn: 167222
2012-11-01 17:14:31 +08:00
return TD->getPointerPrefAlignment();
return TD->getPrefTypeAlignment(Ty);
}
void NVPTXAsmPrinter::printParamName(Function::const_arg_iterator I,
int paramIndex, raw_ostream &O) {
O << *getSymbol(I->getParent()) << "_param_" << paramIndex;
}
void NVPTXAsmPrinter::printParamName(int paramIndex, raw_ostream &O) {
O << *CurrentFnSym << "_param_" << paramIndex;
}
void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) {
const DataLayout *TD = TM.getDataLayout();
const AttributeSet &PAL = F->getAttributes();
const TargetLowering *TLI = nvptxSubtarget->getTargetLowering();
Function::const_arg_iterator I, E;
unsigned paramIndex = 0;
bool first = true;
bool isKernelFunc = llvm::isKernelFunction(*F);
bool isABI = (nvptxSubtarget->getSmVersion() >= 20);
MVT thePointerTy = TLI->getPointerTy();
O << "(\n";
for (I = F->arg_begin(), E = F->arg_end(); I != E; ++I, paramIndex++) {
Type *Ty = I->getType();
if (!first)
O << ",\n";
first = false;
// Handle image/sampler parameters
if (isKernelFunction(*F)) {
if (isSampler(*I) || isImage(*I)) {
if (isImage(*I)) {
std::string sname = I->getName();
if (isImageWriteOnly(*I) || isImageReadWrite(*I)) {
if (nvptxSubtarget->hasImageHandles())
O << "\t.param .u64 .ptr .surfref ";
else
O << "\t.param .surfref ";
O << *CurrentFnSym << "_param_" << paramIndex;
}
else { // Default image is read_only
if (nvptxSubtarget->hasImageHandles())
O << "\t.param .u64 .ptr .texref ";
else
O << "\t.param .texref ";
O << *CurrentFnSym << "_param_" << paramIndex;
}
} else {
if (nvptxSubtarget->hasImageHandles())
O << "\t.param .u64 .ptr .samplerref ";
else
O << "\t.param .samplerref ";
O << *CurrentFnSym << "_param_" << paramIndex;
}
continue;
}
}
if (!PAL.hasAttribute(paramIndex + 1, Attribute::ByVal)) {
if (Ty->isAggregateType() || Ty->isVectorTy()) {
// Just print .param .align <a> .b8 .param[size];
// <a> = PAL.getparamalignment
// size = typeallocsize of element type
unsigned align = PAL.getParamAlignment(paramIndex + 1);
if (align == 0)
align = TD->getABITypeAlignment(Ty);
unsigned sz = TD->getTypeAllocSize(Ty);
O << "\t.param .align " << align << " .b8 ";
printParamName(I, paramIndex, O);
O << "[" << sz << "]";
continue;
}
// Just a scalar
const PointerType *PTy = dyn_cast<PointerType>(Ty);
if (isKernelFunc) {
if (PTy) {
// Special handling for pointer arguments to kernel
O << "\t.param .u" << thePointerTy.getSizeInBits() << " ";
if (static_cast<NVPTXTargetMachine &>(TM).getDrvInterface() !=
NVPTX::CUDA) {
Type *ETy = PTy->getElementType();
int addrSpace = PTy->getAddressSpace();
switch (addrSpace) {
default:
O << ".ptr ";
break;
case llvm::ADDRESS_SPACE_CONST:
O << ".ptr .const ";
break;
case llvm::ADDRESS_SPACE_SHARED:
O << ".ptr .shared ";
break;
case llvm::ADDRESS_SPACE_GLOBAL:
O << ".ptr .global ";
break;
}
O << ".align " << (int) getOpenCLAlignment(TD, ETy) << " ";
}
printParamName(I, paramIndex, O);
continue;
}
// non-pointer scalar to kernel func
O << "\t.param .";
// Special case: predicate operands become .u8 types
if (Ty->isIntegerTy(1))
O << "u8";
else
O << getPTXFundamentalTypeStr(Ty);
O << " ";
printParamName(I, paramIndex, O);
continue;
}
// Non-kernel function, just print .param .b<size> for ABI
// and .reg .b<size> for non-ABI
unsigned sz = 0;
if (isa<IntegerType>(Ty)) {
sz = cast<IntegerType>(Ty)->getBitWidth();
if (sz < 32)
sz = 32;
} else if (isa<PointerType>(Ty))
sz = thePointerTy.getSizeInBits();
else
sz = Ty->getPrimitiveSizeInBits();
if (isABI)
O << "\t.param .b" << sz << " ";
else
O << "\t.reg .b" << sz << " ";
printParamName(I, paramIndex, O);
continue;
}
// param has byVal attribute. So should be a pointer
const PointerType *PTy = dyn_cast<PointerType>(Ty);
assert(PTy && "Param with byval attribute should be a pointer type");
Type *ETy = PTy->getElementType();
if (isABI || isKernelFunc) {
// Just print .param .align <a> .b8 .param[size];
// <a> = PAL.getparamalignment
// size = typeallocsize of element type
unsigned align = PAL.getParamAlignment(paramIndex + 1);
if (align == 0)
align = TD->getABITypeAlignment(ETy);
unsigned sz = TD->getTypeAllocSize(ETy);
O << "\t.param .align " << align << " .b8 ";
printParamName(I, paramIndex, O);
O << "[" << sz << "]";
continue;
} else {
// Split the ETy into constituent parts and
// print .param .b<size> <name> for each part.
// Further, if a part is vector, print the above for
// each vector element.
SmallVector<EVT, 16> vtparts;
ComputeValueVTs(*TLI, ETy, vtparts);
for (unsigned i = 0, e = vtparts.size(); i != e; ++i) {
unsigned elems = 1;
EVT elemtype = vtparts[i];
if (vtparts[i].isVector()) {
elems = vtparts[i].getVectorNumElements();
elemtype = vtparts[i].getVectorElementType();
}
for (unsigned j = 0, je = elems; j != je; ++j) {
unsigned sz = elemtype.getSizeInBits();
if (elemtype.isInteger() && (sz < 32))
sz = 32;
O << "\t.reg .b" << sz << " ";
printParamName(I, paramIndex, O);
if (j < je - 1)
O << ",\n";
++paramIndex;
}
if (i < e - 1)
O << ",\n";
}
--paramIndex;
continue;
}
}
O << "\n)\n";
}
void NVPTXAsmPrinter::emitFunctionParamList(const MachineFunction &MF,
raw_ostream &O) {
const Function *F = MF.getFunction();
emitFunctionParamList(F, O);
}
void NVPTXAsmPrinter::setAndEmitFunctionVirtualRegisters(
const MachineFunction &MF) {
SmallString<128> Str;
raw_svector_ostream O(Str);
// Map the global virtual register number to a register class specific
// virtual register number starting from 1 with that class.
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
//unsigned numRegClasses = TRI->getNumRegClasses();
// Emit the Fake Stack Object
const MachineFrameInfo *MFI = MF.getFrameInfo();
int NumBytes = (int) MFI->getStackSize();
if (NumBytes) {
O << "\t.local .align " << MFI->getMaxAlignment() << " .b8 \t" << DEPOTNAME
<< getFunctionNumber() << "[" << NumBytes << "];\n";
if (static_cast<const NVPTXTargetMachine &>(MF.getTarget()).is64Bit()) {
O << "\t.reg .b64 \t%SP;\n";
O << "\t.reg .b64 \t%SPL;\n";
} else {
O << "\t.reg .b32 \t%SP;\n";
O << "\t.reg .b32 \t%SPL;\n";
}
}
// Go through all virtual registers to establish the mapping between the
// global virtual
// register number and the per class virtual register number.
// We use the per class virtual register number in the ptx output.
unsigned int numVRs = MRI->getNumVirtRegs();
for (unsigned i = 0; i < numVRs; i++) {
unsigned int vr = TRI->index2VirtReg(i);
const TargetRegisterClass *RC = MRI->getRegClass(vr);
DenseMap<unsigned, unsigned> &regmap = VRegMapping[RC];
int n = regmap.size();
regmap.insert(std::make_pair(vr, n + 1));
}
// Emit register declarations
// @TODO: Extract out the real register usage
// O << "\t.reg .pred %p<" << NVPTXNumRegisters << ">;\n";
// O << "\t.reg .s16 %rc<" << NVPTXNumRegisters << ">;\n";
// O << "\t.reg .s16 %rs<" << NVPTXNumRegisters << ">;\n";
// O << "\t.reg .s32 %r<" << NVPTXNumRegisters << ">;\n";
// O << "\t.reg .s64 %rd<" << NVPTXNumRegisters << ">;\n";
// O << "\t.reg .f32 %f<" << NVPTXNumRegisters << ">;\n";
// O << "\t.reg .f64 %fd<" << NVPTXNumRegisters << ">;\n";
// Emit declaration of the virtual registers or 'physical' registers for
// each register class
for (unsigned i=0; i< TRI->getNumRegClasses(); i++) {
const TargetRegisterClass *RC = TRI->getRegClass(i);
DenseMap<unsigned, unsigned> &regmap = VRegMapping[RC];
std::string rcname = getNVPTXRegClassName(RC);
std::string rcStr = getNVPTXRegClassStr(RC);
int n = regmap.size();
// Only declare those registers that may be used.
if (n) {
O << "\t.reg " << rcname << " \t" << rcStr << "<" << (n+1)
<< ">;\n";
}
}
OutStreamer.EmitRawText(O.str());
}
void NVPTXAsmPrinter::printFPConstant(const ConstantFP *Fp, raw_ostream &O) {
APFloat APF = APFloat(Fp->getValueAPF()); // make a copy
bool ignored;
unsigned int numHex;
const char *lead;
if (Fp->getType()->getTypeID() == Type::FloatTyID) {
numHex = 8;
lead = "0f";
APF.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &ignored);
} else if (Fp->getType()->getTypeID() == Type::DoubleTyID) {
numHex = 16;
lead = "0d";
APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &ignored);
} else
llvm_unreachable("unsupported fp type");
APInt API = APF.bitcastToAPInt();
std::string hexstr(utohexstr(API.getZExtValue()));
O << lead;
if (hexstr.length() < numHex)
O << std::string(numHex - hexstr.length(), '0');
O << utohexstr(API.getZExtValue());
}
void NVPTXAsmPrinter::printScalarConstant(const Constant *CPV, raw_ostream &O) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(CPV)) {
O << CI->getValue();
return;
}
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CPV)) {
printFPConstant(CFP, O);
return;
}
if (isa<ConstantPointerNull>(CPV)) {
O << "0";
return;
}
if (const GlobalValue *GVar = dyn_cast<GlobalValue>(CPV)) {
PointerType *PTy = dyn_cast<PointerType>(GVar->getType());
bool IsNonGenericPointer = false;
if (PTy && PTy->getAddressSpace() != 0) {
IsNonGenericPointer = true;
}
if (EmitGeneric && !isa<Function>(CPV) && !IsNonGenericPointer) {
O << "generic(";
O << *getSymbol(GVar);
O << ")";
} else {
O << *getSymbol(GVar);
}
return;
}
if (const ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) {
const Value *v = Cexpr->stripPointerCasts();
PointerType *PTy = dyn_cast<PointerType>(Cexpr->getType());
bool IsNonGenericPointer = false;
if (PTy && PTy->getAddressSpace() != 0) {
IsNonGenericPointer = true;
}
if (const GlobalValue *GVar = dyn_cast<GlobalValue>(v)) {
if (EmitGeneric && !isa<Function>(v) && !IsNonGenericPointer) {
O << "generic(";
O << *getSymbol(GVar);
O << ")";
} else {
O << *getSymbol(GVar);
}
return;
} else {
O << *lowerConstant(CPV);
return;
}
}
llvm_unreachable("Not scalar type found in printScalarConstant()");
}
void NVPTXAsmPrinter::bufferLEByte(const Constant *CPV, int Bytes,
AggBuffer *aggBuffer) {
const DataLayout *TD = TM.getDataLayout();
if (isa<UndefValue>(CPV) || CPV->isNullValue()) {
int s = TD->getTypeAllocSize(CPV->getType());
if (s < Bytes)
s = Bytes;
aggBuffer->addZeros(s);
return;
}
unsigned char *ptr;
switch (CPV->getType()->getTypeID()) {
case Type::IntegerTyID: {
const Type *ETy = CPV->getType();
if (ETy == Type::getInt8Ty(CPV->getContext())) {
unsigned char c =
(unsigned char)(dyn_cast<ConstantInt>(CPV))->getZExtValue();
ptr = &c;
aggBuffer->addBytes(ptr, 1, Bytes);
} else if (ETy == Type::getInt16Ty(CPV->getContext())) {
short int16 = (short)(dyn_cast<ConstantInt>(CPV))->getZExtValue();
ptr = (unsigned char *)&int16;
aggBuffer->addBytes(ptr, 2, Bytes);
} else if (ETy == Type::getInt32Ty(CPV->getContext())) {
if (const ConstantInt *constInt = dyn_cast<ConstantInt>(CPV)) {
int int32 = (int)(constInt->getZExtValue());
ptr = (unsigned char *)&int32;
aggBuffer->addBytes(ptr, 4, Bytes);
break;
} else if (const ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) {
if (const ConstantInt *constInt = dyn_cast<ConstantInt>(
ConstantFoldConstantExpression(Cexpr, *TD))) {
int int32 = (int)(constInt->getZExtValue());
ptr = (unsigned char *)&int32;
aggBuffer->addBytes(ptr, 4, Bytes);
break;
}
if (Cexpr->getOpcode() == Instruction::PtrToInt) {
Value *v = Cexpr->getOperand(0)->stripPointerCasts();
aggBuffer->addSymbol(v);
aggBuffer->addZeros(4);
break;
}
}
llvm_unreachable("unsupported integer const type");
} else if (ETy == Type::getInt64Ty(CPV->getContext())) {
if (const ConstantInt *constInt = dyn_cast<ConstantInt>(CPV)) {
long long int64 = (long long)(constInt->getZExtValue());
ptr = (unsigned char *)&int64;
aggBuffer->addBytes(ptr, 8, Bytes);
break;
} else if (const ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) {
if (const ConstantInt *constInt = dyn_cast<ConstantInt>(
ConstantFoldConstantExpression(Cexpr, *TD))) {
long long int64 = (long long)(constInt->getZExtValue());
ptr = (unsigned char *)&int64;
aggBuffer->addBytes(ptr, 8, Bytes);
break;
}
if (Cexpr->getOpcode() == Instruction::PtrToInt) {
Value *v = Cexpr->getOperand(0)->stripPointerCasts();
aggBuffer->addSymbol(v);
aggBuffer->addZeros(8);
break;
}
}
llvm_unreachable("unsupported integer const type");
} else
llvm_unreachable("unsupported integer const type");
break;
}
case Type::FloatTyID:
case Type::DoubleTyID: {
const ConstantFP *CFP = dyn_cast<ConstantFP>(CPV);
const Type *Ty = CFP->getType();
if (Ty == Type::getFloatTy(CPV->getContext())) {
float float32 = (float) CFP->getValueAPF().convertToFloat();
ptr = (unsigned char *)&float32;
aggBuffer->addBytes(ptr, 4, Bytes);
} else if (Ty == Type::getDoubleTy(CPV->getContext())) {
double float64 = CFP->getValueAPF().convertToDouble();
ptr = (unsigned char *)&float64;
aggBuffer->addBytes(ptr, 8, Bytes);
} else {
llvm_unreachable("unsupported fp const type");
}
break;
}
case Type::PointerTyID: {
if (const GlobalValue *GVar = dyn_cast<GlobalValue>(CPV)) {
aggBuffer->addSymbol(GVar);
} else if (const ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) {
const Value *v = Cexpr->stripPointerCasts();
aggBuffer->addSymbol(v);
}
unsigned int s = TD->getTypeAllocSize(CPV->getType());
aggBuffer->addZeros(s);
break;
}
case Type::ArrayTyID:
case Type::VectorTyID:
case Type::StructTyID: {
if (isa<ConstantArray>(CPV) || isa<ConstantVector>(CPV) ||
isa<ConstantStruct>(CPV) || isa<ConstantDataSequential>(CPV)) {
int ElementSize = TD->getTypeAllocSize(CPV->getType());
bufferAggregateConstant(CPV, aggBuffer);
if (Bytes > ElementSize)
aggBuffer->addZeros(Bytes - ElementSize);
} else if (isa<ConstantAggregateZero>(CPV))
aggBuffer->addZeros(Bytes);
else
llvm_unreachable("Unexpected Constant type");
break;
}
default:
llvm_unreachable("unsupported type");
}
}
void NVPTXAsmPrinter::bufferAggregateConstant(const Constant *CPV,
AggBuffer *aggBuffer) {
const DataLayout *TD = TM.getDataLayout();
int Bytes;
// Old constants
if (isa<ConstantArray>(CPV) || isa<ConstantVector>(CPV)) {
if (CPV->getNumOperands())
for (unsigned i = 0, e = CPV->getNumOperands(); i != e; ++i)
bufferLEByte(cast<Constant>(CPV->getOperand(i)), 0, aggBuffer);
return;
}
if (const ConstantDataSequential *CDS =
dyn_cast<ConstantDataSequential>(CPV)) {
if (CDS->getNumElements())
for (unsigned i = 0; i < CDS->getNumElements(); ++i)
bufferLEByte(cast<Constant>(CDS->getElementAsConstant(i)), 0,
aggBuffer);
return;
}
if (isa<ConstantStruct>(CPV)) {
if (CPV->getNumOperands()) {
StructType *ST = cast<StructType>(CPV->getType());
for (unsigned i = 0, e = CPV->getNumOperands(); i != e; ++i) {
if (i == (e - 1))
Bytes = TD->getStructLayout(ST)->getElementOffset(0) +
TD->getTypeAllocSize(ST) -
TD->getStructLayout(ST)->getElementOffset(i);
else
Bytes = TD->getStructLayout(ST)->getElementOffset(i + 1) -
TD->getStructLayout(ST)->getElementOffset(i);
bufferLEByte(cast<Constant>(CPV->getOperand(i)), Bytes, aggBuffer);
}
}
return;
}
llvm_unreachable("unsupported constant type in printAggregateConstant()");
}
// buildTypeNameMap - Run through symbol table looking for type names.
//
bool NVPTXAsmPrinter::isImageType(const Type *Ty) {
std::map<const Type *, std::string>::iterator PI = TypeNameMap.find(Ty);
if (PI != TypeNameMap.end() && (!PI->second.compare("struct._image1d_t") ||
!PI->second.compare("struct._image2d_t") ||
!PI->second.compare("struct._image3d_t")))
return true;
return false;
}
bool NVPTXAsmPrinter::ignoreLoc(const MachineInstr &MI) {
switch (MI.getOpcode()) {
default:
return false;
case NVPTX::CallArgBeginInst:
case NVPTX::CallArgEndInst0:
case NVPTX::CallArgEndInst1:
case NVPTX::CallArgF32:
case NVPTX::CallArgF64:
case NVPTX::CallArgI16:
case NVPTX::CallArgI32:
case NVPTX::CallArgI32imm:
case NVPTX::CallArgI64:
case NVPTX::CallArgParam:
case NVPTX::CallVoidInst:
case NVPTX::CallVoidInstReg:
case NVPTX::Callseq_End:
case NVPTX::CallVoidInstReg64:
case NVPTX::DeclareParamInst:
case NVPTX::DeclareRetMemInst:
case NVPTX::DeclareRetRegInst:
case NVPTX::DeclareRetScalarInst:
case NVPTX::DeclareScalarParamInst:
case NVPTX::DeclareScalarRegInst:
case NVPTX::StoreParamF32:
case NVPTX::StoreParamF64:
case NVPTX::StoreParamI16:
case NVPTX::StoreParamI32:
case NVPTX::StoreParamI64:
case NVPTX::StoreParamI8:
case NVPTX::StoreRetvalF32:
case NVPTX::StoreRetvalF64:
case NVPTX::StoreRetvalI16:
case NVPTX::StoreRetvalI32:
case NVPTX::StoreRetvalI64:
case NVPTX::StoreRetvalI8:
case NVPTX::LastCallArgF32:
case NVPTX::LastCallArgF64:
case NVPTX::LastCallArgI16:
case NVPTX::LastCallArgI32:
case NVPTX::LastCallArgI32imm:
case NVPTX::LastCallArgI64:
case NVPTX::LastCallArgParam:
case NVPTX::LoadParamMemF32:
case NVPTX::LoadParamMemF64:
case NVPTX::LoadParamMemI16:
case NVPTX::LoadParamMemI32:
case NVPTX::LoadParamMemI64:
case NVPTX::LoadParamMemI8:
case NVPTX::PrototypeInst:
case NVPTX::DBG_VALUE:
return true;
}
return false;
}
/// PrintAsmOperand - Print out an operand for an inline asm expression.
///
bool NVPTXAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
unsigned AsmVariant,
const char *ExtraCode, raw_ostream &O) {
if (ExtraCode && ExtraCode[0]) {
if (ExtraCode[1] != 0)
return true; // Unknown modifier.
switch (ExtraCode[0]) {
default:
// See if this is a generic print operand
return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
case 'r':
break;
}
}
printOperand(MI, OpNo, O);
return false;
}
bool NVPTXAsmPrinter::PrintAsmMemoryOperand(
const MachineInstr *MI, unsigned OpNo, unsigned AsmVariant,
const char *ExtraCode, raw_ostream &O) {
if (ExtraCode && ExtraCode[0])
return true; // Unknown modifier
O << '[';
printMemOperand(MI, OpNo, O);
O << ']';
return false;
}
void NVPTXAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
raw_ostream &O, const char *Modifier) {
const MachineOperand &MO = MI->getOperand(opNum);
switch (MO.getType()) {
case MachineOperand::MO_Register:
if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
if (MO.getReg() == NVPTX::VRDepot)
O << DEPOTNAME << getFunctionNumber();
else
O << NVPTXInstPrinter::getRegisterName(MO.getReg());
} else {
emitVirtualRegister(MO.getReg(), O);
}
return;
case MachineOperand::MO_Immediate:
if (!Modifier)
O << MO.getImm();
else if (strstr(Modifier, "vec") == Modifier)
printVecModifiedImmediate(MO, Modifier, O);
else
llvm_unreachable(
"Don't know how to handle modifier on immediate operand");
return;
case MachineOperand::MO_FPImmediate:
printFPConstant(MO.getFPImm(), O);
break;
case MachineOperand::MO_GlobalAddress:
O << *getSymbol(MO.getGlobal());
break;
case MachineOperand::MO_MachineBasicBlock:
O << *MO.getMBB()->getSymbol();
return;
default:
llvm_unreachable("Operand type not supported.");
}
}
void NVPTXAsmPrinter::printMemOperand(const MachineInstr *MI, int opNum,
raw_ostream &O, const char *Modifier) {
printOperand(MI, opNum, O);
if (Modifier && !strcmp(Modifier, "add")) {
O << ", ";
printOperand(MI, opNum + 1, O);
} else {
if (MI->getOperand(opNum + 1).isImm() &&
MI->getOperand(opNum + 1).getImm() == 0)
return; // don't print ',0' or '+0'
O << "+";
printOperand(MI, opNum + 1, O);
}
}
void NVPTXAsmPrinter::emitSrcInText(StringRef filename, unsigned line) {
std::stringstream temp;
LineReader *reader = this->getReader(filename);
temp << "\n//";
temp << filename.str();
temp << ":";
temp << line;
temp << " ";
temp << reader->readLine(line);
temp << "\n";
this->OutStreamer.EmitRawText(temp.str());
}
LineReader *NVPTXAsmPrinter::getReader(std::string filename) {
if (!reader) {
reader = new LineReader(filename);
}
if (reader->fileName() != filename) {
delete reader;
reader = new LineReader(filename);
}
return reader;
}
std::string LineReader::readLine(unsigned lineNum) {
if (lineNum < theCurLine) {
theCurLine = 0;
fstr.seekg(0, std::ios::beg);
}
while (theCurLine < lineNum) {
fstr.getline(buff, 500);
theCurLine++;
}
return buff;
}
// Force static initialization.
extern "C" void LLVMInitializeNVPTXAsmPrinter() {
RegisterAsmPrinter<NVPTXAsmPrinter> X(TheNVPTXTarget32);
RegisterAsmPrinter<NVPTXAsmPrinter> Y(TheNVPTXTarget64);
}