forked from OSchip/llvm-project
[SPIRV] add SPIRVPrepareFunctions pass and update other passes
The patch adds SPIRVPrepareFunctions pass, which modifies function signatures containing aggregate arguments and/or return values before IR translation. Information about the original signatures is stored in metadata. It is used during call lowering to restore correct SPIR-V types of function arguments and return values. This pass also substitutes some llvm intrinsic calls to function calls, generating the necessary functions in the module, as the SPIRV translator does. The patch also includes changes in other modules, fixing errors and enabling many SPIR-V features that were omitted earlier. And 15 LIT tests are also added to demonstrate the new functionality. Differential Revision: https://reviews.llvm.org/D129730 Co-authored-by: Aleksandr Bezzubikov <zuban32s@gmail.com> Co-authored-by: Michal Paszkowski <michal.paszkowski@outlook.com> Co-authored-by: Andrey Tretyakov <andrey1.tretyakov@intel.com> Co-authored-by: Konrad Trifunovic <konrad.trifunovic@intel.com>
This commit is contained in:
parent
0ccb6da725
commit
b8e1544b9d
|
@ -20,7 +20,7 @@ let TargetPrefix = "spv" in {
|
|||
|
||||
def int_spv_gep : Intrinsic<[llvm_anyptr_ty], [llvm_i1_ty, llvm_any_ty, llvm_vararg_ty], [ImmArg<ArgIndex<0>>]>;
|
||||
def int_spv_load : Intrinsic<[llvm_i32_ty], [llvm_anyptr_ty, llvm_i16_ty, llvm_i8_ty], [ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
|
||||
def int_spv_store : Intrinsic<[], [llvm_i32_ty, llvm_anyptr_ty, llvm_i16_ty, llvm_i8_ty], [ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
|
||||
def int_spv_store : Intrinsic<[], [llvm_any_ty, llvm_anyptr_ty, llvm_i16_ty, llvm_i8_ty], [ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
|
||||
def int_spv_extractv : Intrinsic<[llvm_any_ty], [llvm_i32_ty, llvm_vararg_ty]>;
|
||||
def int_spv_insertv : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_any_ty, llvm_vararg_ty]>;
|
||||
def int_spv_extractelt : Intrinsic<[llvm_any_ty], [llvm_any_ty, llvm_anyint_ty]>;
|
||||
|
@ -28,4 +28,5 @@ let TargetPrefix = "spv" in {
|
|||
def int_spv_const_composite : Intrinsic<[llvm_i32_ty], [llvm_vararg_ty]>;
|
||||
def int_spv_bitcast : Intrinsic<[llvm_any_ty], [llvm_any_ty]>;
|
||||
def int_spv_switch : Intrinsic<[], [llvm_any_ty, llvm_vararg_ty]>;
|
||||
def int_spv_cmpxchg : Intrinsic<[llvm_i32_ty], [llvm_any_ty, llvm_vararg_ty]>;
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ add_llvm_target(SPIRVCodeGen
|
|||
SPIRVMCInstLower.cpp
|
||||
SPIRVModuleAnalysis.cpp
|
||||
SPIRVPreLegalizer.cpp
|
||||
SPIRVPrepareFunctions.cpp
|
||||
SPIRVRegisterBankInfo.cpp
|
||||
SPIRVRegisterInfo.cpp
|
||||
SPIRVSubtarget.cpp
|
||||
|
@ -43,6 +44,7 @@ add_llvm_target(SPIRVCodeGen
|
|||
SelectionDAG
|
||||
Support
|
||||
Target
|
||||
TransformUtils
|
||||
|
||||
ADD_TO_COMPONENT
|
||||
SPIRV
|
||||
|
|
|
@ -1068,5 +1068,15 @@ StringRef getKernelProfilingInfoName(KernelProfilingInfo e) {
|
|||
}
|
||||
llvm_unreachable("Unexpected operand");
|
||||
}
|
||||
|
||||
std::string getExtInstSetName(InstructionSet e) {
|
||||
switch (e) {
|
||||
CASE(InstructionSet, OpenCL_std)
|
||||
CASE(InstructionSet, GLSL_std_450)
|
||||
CASE(InstructionSet, SPV_AMD_shader_trinary_minmax)
|
||||
break;
|
||||
}
|
||||
llvm_unreachable("Unexpected operand");
|
||||
}
|
||||
} // namespace SPIRV
|
||||
} // namespace llvm
|
||||
|
|
|
@ -706,6 +706,19 @@ enum class KernelProfilingInfo : uint32_t {
|
|||
CmdExecTime = 0x1,
|
||||
};
|
||||
StringRef getKernelProfilingInfoName(KernelProfilingInfo e);
|
||||
|
||||
enum class InstructionSet : uint32_t {
|
||||
OpenCL_std = 0,
|
||||
GLSL_std_450 = 1,
|
||||
SPV_AMD_shader_trinary_minmax = 2,
|
||||
};
|
||||
std::string getExtInstSetName(InstructionSet e);
|
||||
|
||||
// TODO: implement other mnemonics.
|
||||
enum class Opcode : uint32_t {
|
||||
InBoundsPtrAccessChain = 70,
|
||||
PtrCastToGeneric = 121,
|
||||
};
|
||||
} // namespace SPIRV
|
||||
} // namespace llvm
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ void SPIRVInstPrinter::printOpConstantVarOps(const MCInst *MI,
|
|||
}
|
||||
|
||||
void SPIRVInstPrinter::recordOpExtInstImport(const MCInst *MI) {
|
||||
llvm_unreachable("Unimplemented recordOpExtInstImport");
|
||||
// TODO: insert {Reg, Set} into ExtInstSetIDs map.
|
||||
}
|
||||
|
||||
void SPIRVInstPrinter::printInst(const MCInst *MI, uint64_t Address,
|
||||
|
@ -176,7 +176,18 @@ void SPIRVInstPrinter::printInst(const MCInst *MI, uint64_t Address,
|
|||
}
|
||||
|
||||
void SPIRVInstPrinter::printOpExtInst(const MCInst *MI, raw_ostream &O) {
|
||||
llvm_unreachable("Unimplemented printOpExtInst");
|
||||
// The fixed operands have already been printed, so just need to decide what
|
||||
// type of ExtInst operands to print based on the instruction set and number.
|
||||
MCInstrDesc MCDesc = MII.get(MI->getOpcode());
|
||||
unsigned NumFixedOps = MCDesc.getNumOperands();
|
||||
const auto NumOps = MI->getNumOperands();
|
||||
if (NumOps == NumFixedOps)
|
||||
return;
|
||||
|
||||
O << ' ';
|
||||
|
||||
// TODO: implement special printing for OpenCLExtInst::vstor*.
|
||||
printRemainingVariableOps(MI, NumFixedOps, O, true);
|
||||
}
|
||||
|
||||
void SPIRVInstPrinter::printOpDecorate(const MCInst *MI, raw_ostream &O) {
|
||||
|
|
|
@ -19,6 +19,7 @@ class SPIRVSubtarget;
|
|||
class InstructionSelector;
|
||||
class RegisterBankInfo;
|
||||
|
||||
ModulePass *createSPIRVPrepareFunctionsPass();
|
||||
FunctionPass *createSPIRVPreLegalizerPass();
|
||||
FunctionPass *createSPIRVEmitIntrinsicsPass(SPIRVTargetMachine *TM);
|
||||
InstructionSelector *
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include "SPIRVUtils.h"
|
||||
#include "TargetInfo/SPIRVTargetInfo.h"
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/Analysis/ValueTracking.h"
|
||||
#include "llvm/CodeGen/AsmPrinter.h"
|
||||
#include "llvm/CodeGen/MachineConstantPool.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
|
@ -58,9 +59,14 @@ public:
|
|||
void outputModuleSection(SPIRV::ModuleSectionType MSType);
|
||||
void outputEntryPoints();
|
||||
void outputDebugSourceAndStrings(const Module &M);
|
||||
void outputOpExtInstImports(const Module &M);
|
||||
void outputOpMemoryModel();
|
||||
void outputOpFunctionEnd();
|
||||
void outputExtFuncDecls();
|
||||
void outputExecutionModeFromMDNode(Register Reg, MDNode *Node,
|
||||
SPIRV::ExecutionMode EM);
|
||||
void outputExecutionMode(const Module &M);
|
||||
void outputAnnotations(const Module &M);
|
||||
void outputModuleSections();
|
||||
|
||||
void emitInstruction(const MachineInstr *MI) override;
|
||||
|
@ -127,6 +133,8 @@ void SPIRVAsmPrinter::emitFunctionBodyEnd() {
|
|||
}
|
||||
|
||||
void SPIRVAsmPrinter::emitOpLabel(const MachineBasicBlock &MBB) {
|
||||
if (MAI->MBBsToSkip.contains(&MBB))
|
||||
return;
|
||||
MCInst LabelInst;
|
||||
LabelInst.setOpcode(SPIRV::OpLabel);
|
||||
LabelInst.addOperand(MCOperand::createReg(MAI->getOrCreateMBBRegister(MBB)));
|
||||
|
@ -237,6 +245,13 @@ void SPIRVAsmPrinter::outputModuleSection(SPIRV::ModuleSectionType MSType) {
|
|||
}
|
||||
|
||||
void SPIRVAsmPrinter::outputDebugSourceAndStrings(const Module &M) {
|
||||
// Output OpSourceExtensions.
|
||||
for (auto &Str : MAI->SrcExt) {
|
||||
MCInst Inst;
|
||||
Inst.setOpcode(SPIRV::OpSourceExtension);
|
||||
addStringImm(Str.first(), Inst);
|
||||
outputMCInst(Inst);
|
||||
}
|
||||
// Output OpSource.
|
||||
MCInst Inst;
|
||||
Inst.setOpcode(SPIRV::OpSource);
|
||||
|
@ -246,6 +261,19 @@ void SPIRVAsmPrinter::outputDebugSourceAndStrings(const Module &M) {
|
|||
outputMCInst(Inst);
|
||||
}
|
||||
|
||||
void SPIRVAsmPrinter::outputOpExtInstImports(const Module &M) {
|
||||
for (auto &CU : MAI->ExtInstSetMap) {
|
||||
unsigned Set = CU.first;
|
||||
Register Reg = CU.second;
|
||||
MCInst Inst;
|
||||
Inst.setOpcode(SPIRV::OpExtInstImport);
|
||||
Inst.addOperand(MCOperand::createReg(Reg));
|
||||
addStringImm(getExtInstSetName(static_cast<SPIRV::InstructionSet>(Set)),
|
||||
Inst);
|
||||
outputMCInst(Inst);
|
||||
}
|
||||
}
|
||||
|
||||
void SPIRVAsmPrinter::outputOpMemoryModel() {
|
||||
MCInst Inst;
|
||||
Inst.setOpcode(SPIRV::OpMemoryModel);
|
||||
|
@ -301,6 +329,135 @@ void SPIRVAsmPrinter::outputExtFuncDecls() {
|
|||
}
|
||||
}
|
||||
|
||||
// Encode LLVM type by SPIR-V execution mode VecTypeHint.
|
||||
static unsigned encodeVecTypeHint(Type *Ty) {
|
||||
if (Ty->isHalfTy())
|
||||
return 4;
|
||||
if (Ty->isFloatTy())
|
||||
return 5;
|
||||
if (Ty->isDoubleTy())
|
||||
return 6;
|
||||
if (IntegerType *IntTy = dyn_cast<IntegerType>(Ty)) {
|
||||
switch (IntTy->getIntegerBitWidth()) {
|
||||
case 8:
|
||||
return 0;
|
||||
case 16:
|
||||
return 1;
|
||||
case 32:
|
||||
return 2;
|
||||
case 64:
|
||||
return 3;
|
||||
default:
|
||||
llvm_unreachable("invalid integer type");
|
||||
}
|
||||
}
|
||||
if (FixedVectorType *VecTy = dyn_cast<FixedVectorType>(Ty)) {
|
||||
Type *EleTy = VecTy->getElementType();
|
||||
unsigned Size = VecTy->getNumElements();
|
||||
return Size << 16 | encodeVecTypeHint(EleTy);
|
||||
}
|
||||
llvm_unreachable("invalid type");
|
||||
}
|
||||
|
||||
static void addOpsFromMDNode(MDNode *MDN, MCInst &Inst,
|
||||
SPIRV::ModuleAnalysisInfo *MAI) {
|
||||
for (const MDOperand &MDOp : MDN->operands()) {
|
||||
if (auto *CMeta = dyn_cast<ConstantAsMetadata>(MDOp)) {
|
||||
Constant *C = CMeta->getValue();
|
||||
if (ConstantInt *Const = dyn_cast<ConstantInt>(C)) {
|
||||
Inst.addOperand(MCOperand::createImm(Const->getZExtValue()));
|
||||
} else if (auto *CE = dyn_cast<Function>(C)) {
|
||||
Register FuncReg = MAI->getFuncReg(CE->getName().str());
|
||||
assert(FuncReg.isValid());
|
||||
Inst.addOperand(MCOperand::createReg(FuncReg));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SPIRVAsmPrinter::outputExecutionModeFromMDNode(Register Reg, MDNode *Node,
|
||||
SPIRV::ExecutionMode EM) {
|
||||
MCInst Inst;
|
||||
Inst.setOpcode(SPIRV::OpExecutionMode);
|
||||
Inst.addOperand(MCOperand::createReg(Reg));
|
||||
Inst.addOperand(MCOperand::createImm(static_cast<unsigned>(EM)));
|
||||
addOpsFromMDNode(Node, Inst, MAI);
|
||||
outputMCInst(Inst);
|
||||
}
|
||||
|
||||
void SPIRVAsmPrinter::outputExecutionMode(const Module &M) {
|
||||
NamedMDNode *Node = M.getNamedMetadata("spirv.ExecutionMode");
|
||||
if (Node) {
|
||||
for (unsigned i = 0; i < Node->getNumOperands(); i++) {
|
||||
MCInst Inst;
|
||||
Inst.setOpcode(SPIRV::OpExecutionMode);
|
||||
addOpsFromMDNode(cast<MDNode>(Node->getOperand(i)), Inst, MAI);
|
||||
outputMCInst(Inst);
|
||||
}
|
||||
}
|
||||
for (auto FI = M.begin(), E = M.end(); FI != E; ++FI) {
|
||||
const Function &F = *FI;
|
||||
if (F.isDeclaration())
|
||||
continue;
|
||||
Register FReg = MAI->getFuncReg(F.getGlobalIdentifier());
|
||||
assert(FReg.isValid());
|
||||
if (MDNode *Node = F.getMetadata("reqd_work_group_size"))
|
||||
outputExecutionModeFromMDNode(FReg, Node,
|
||||
SPIRV::ExecutionMode::LocalSize);
|
||||
if (MDNode *Node = F.getMetadata("work_group_size_hint"))
|
||||
outputExecutionModeFromMDNode(FReg, Node,
|
||||
SPIRV::ExecutionMode::LocalSizeHint);
|
||||
if (MDNode *Node = F.getMetadata("intel_reqd_sub_group_size"))
|
||||
outputExecutionModeFromMDNode(FReg, Node,
|
||||
SPIRV::ExecutionMode::SubgroupSize);
|
||||
if (MDNode *Node = F.getMetadata("vec_type_hint")) {
|
||||
MCInst Inst;
|
||||
Inst.setOpcode(SPIRV::OpExecutionMode);
|
||||
Inst.addOperand(MCOperand::createReg(FReg));
|
||||
unsigned EM = static_cast<unsigned>(SPIRV::ExecutionMode::VecTypeHint);
|
||||
Inst.addOperand(MCOperand::createImm(EM));
|
||||
unsigned TypeCode = encodeVecTypeHint(getMDOperandAsType(Node, 0));
|
||||
Inst.addOperand(MCOperand::createImm(TypeCode));
|
||||
outputMCInst(Inst);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SPIRVAsmPrinter::outputAnnotations(const Module &M) {
|
||||
outputModuleSection(SPIRV::MB_Annotations);
|
||||
// Process llvm.global.annotations special global variable.
|
||||
for (auto F = M.global_begin(), E = M.global_end(); F != E; ++F) {
|
||||
if ((*F).getName() != "llvm.global.annotations")
|
||||
continue;
|
||||
const GlobalVariable *V = &(*F);
|
||||
const ConstantArray *CA = cast<ConstantArray>(V->getOperand(0));
|
||||
for (Value *Op : CA->operands()) {
|
||||
ConstantStruct *CS = cast<ConstantStruct>(Op);
|
||||
// The first field of the struct contains a pointer to
|
||||
// the annotated variable.
|
||||
Value *AnnotatedVar = CS->getOperand(0)->stripPointerCasts();
|
||||
if (!isa<Function>(AnnotatedVar))
|
||||
llvm_unreachable("Unsupported value in llvm.global.annotations");
|
||||
Function *Func = cast<Function>(AnnotatedVar);
|
||||
Register Reg = MAI->getFuncReg(Func->getGlobalIdentifier());
|
||||
|
||||
// The second field contains a pointer to a global annotation string.
|
||||
GlobalVariable *GV =
|
||||
cast<GlobalVariable>(CS->getOperand(1)->stripPointerCasts());
|
||||
|
||||
StringRef AnnotationString;
|
||||
getConstantStringInfo(GV, AnnotationString);
|
||||
MCInst Inst;
|
||||
Inst.setOpcode(SPIRV::OpDecorate);
|
||||
Inst.addOperand(MCOperand::createReg(Reg));
|
||||
unsigned Dec = static_cast<unsigned>(SPIRV::Decoration::UserSemantic);
|
||||
Inst.addOperand(MCOperand::createImm(Dec));
|
||||
addStringImm(AnnotationString, Inst);
|
||||
outputMCInst(Inst);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SPIRVAsmPrinter::outputModuleSections() {
|
||||
const Module *M = MMI->getModule();
|
||||
// Get the global subtarget to output module-level info.
|
||||
|
@ -311,13 +468,14 @@ void SPIRVAsmPrinter::outputModuleSections() {
|
|||
// Output instructions according to the Logical Layout of a Module:
|
||||
// TODO: 1,2. All OpCapability instructions, then optional OpExtension
|
||||
// instructions.
|
||||
// TODO: 3. Optional OpExtInstImport instructions.
|
||||
// 3. Optional OpExtInstImport instructions.
|
||||
outputOpExtInstImports(*M);
|
||||
// 4. The single required OpMemoryModel instruction.
|
||||
outputOpMemoryModel();
|
||||
// 5. All entry point declarations, using OpEntryPoint.
|
||||
outputEntryPoints();
|
||||
// 6. Execution-mode declarations, using OpExecutionMode or OpExecutionModeId.
|
||||
// TODO:
|
||||
outputExecutionMode(*M);
|
||||
// 7a. Debug: all OpString, OpSourceExtension, OpSource, and
|
||||
// OpSourceContinued, without forward references.
|
||||
outputDebugSourceAndStrings(*M);
|
||||
|
@ -326,7 +484,7 @@ void SPIRVAsmPrinter::outputModuleSections() {
|
|||
// 7c. Debug: all OpModuleProcessed instructions.
|
||||
outputModuleSection(SPIRV::MB_DebugModuleProcessed);
|
||||
// 8. All annotation instructions (all decorations).
|
||||
outputModuleSection(SPIRV::MB_Annotations);
|
||||
outputAnnotations(*M);
|
||||
// 9. All type declarations (OpTypeXXX instructions), all constant
|
||||
// instructions, and all global variable declarations. This section is
|
||||
// the first section to allow use of: OpLine and OpNoLine debug information;
|
||||
|
|
|
@ -24,9 +24,8 @@
|
|||
using namespace llvm;
|
||||
|
||||
SPIRVCallLowering::SPIRVCallLowering(const SPIRVTargetLowering &TLI,
|
||||
const SPIRVSubtarget &ST,
|
||||
SPIRVGlobalRegistry *GR)
|
||||
: CallLowering(&TLI), ST(ST), GR(GR) {}
|
||||
: CallLowering(&TLI), GR(GR) {}
|
||||
|
||||
bool SPIRVCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
|
||||
const Value *Val, ArrayRef<Register> VRegs,
|
||||
|
@ -36,11 +35,13 @@ bool SPIRVCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
|
|||
// TODO: handle the case of multiple registers.
|
||||
if (VRegs.size() > 1)
|
||||
return false;
|
||||
if (Val)
|
||||
if (Val) {
|
||||
const auto &STI = MIRBuilder.getMF().getSubtarget();
|
||||
return MIRBuilder.buildInstr(SPIRV::OpReturnValue)
|
||||
.addUse(VRegs[0])
|
||||
.constrainAllUses(MIRBuilder.getTII(), *ST.getRegisterInfo(),
|
||||
*ST.getRegBankInfo());
|
||||
.constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
|
||||
*STI.getRegBankInfo());
|
||||
}
|
||||
MIRBuilder.buildInstr(SPIRV::OpReturn);
|
||||
return true;
|
||||
}
|
||||
|
@ -63,6 +64,56 @@ static uint32_t getFunctionControl(const Function &F) {
|
|||
return FuncControl;
|
||||
}
|
||||
|
||||
static ConstantInt *getConstInt(MDNode *MD, unsigned NumOp) {
|
||||
if (MD->getNumOperands() > NumOp) {
|
||||
auto *CMeta = dyn_cast<ConstantAsMetadata>(MD->getOperand(NumOp));
|
||||
if (CMeta)
|
||||
return dyn_cast<ConstantInt>(CMeta->getValue());
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// This code restores function args/retvalue types for composite cases
|
||||
// because the final types should still be aggregate whereas they're i32
|
||||
// during the translation to cope with aggregate flattening etc.
|
||||
static FunctionType *getOriginalFunctionType(const Function &F) {
|
||||
auto *NamedMD = F.getParent()->getNamedMetadata("spv.cloned_funcs");
|
||||
if (NamedMD == nullptr)
|
||||
return F.getFunctionType();
|
||||
|
||||
Type *RetTy = F.getFunctionType()->getReturnType();
|
||||
SmallVector<Type *, 4> ArgTypes;
|
||||
for (auto &Arg : F.args())
|
||||
ArgTypes.push_back(Arg.getType());
|
||||
|
||||
auto ThisFuncMDIt =
|
||||
std::find_if(NamedMD->op_begin(), NamedMD->op_end(), [&F](MDNode *N) {
|
||||
return isa<MDString>(N->getOperand(0)) &&
|
||||
cast<MDString>(N->getOperand(0))->getString() == F.getName();
|
||||
});
|
||||
// TODO: probably one function can have numerous type mutations,
|
||||
// so we should support this.
|
||||
if (ThisFuncMDIt != NamedMD->op_end()) {
|
||||
auto *ThisFuncMD = *ThisFuncMDIt;
|
||||
MDNode *MD = dyn_cast<MDNode>(ThisFuncMD->getOperand(1));
|
||||
assert(MD && "MDNode operand is expected");
|
||||
ConstantInt *Const = getConstInt(MD, 0);
|
||||
if (Const) {
|
||||
auto *CMeta = dyn_cast<ConstantAsMetadata>(MD->getOperand(1));
|
||||
assert(CMeta && "ConstantAsMetadata operand is expected");
|
||||
assert(Const->getSExtValue() >= -1);
|
||||
// Currently -1 indicates return value, greater values mean
|
||||
// argument numbers.
|
||||
if (Const->getSExtValue() == -1)
|
||||
RetTy = CMeta->getType();
|
||||
else
|
||||
ArgTypes[Const->getSExtValue()] = CMeta->getType();
|
||||
}
|
||||
}
|
||||
|
||||
return FunctionType::get(RetTy, ArgTypes, F.isVarArg());
|
||||
}
|
||||
|
||||
bool SPIRVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
|
||||
const Function &F,
|
||||
ArrayRef<ArrayRef<Register>> VRegs,
|
||||
|
@ -71,7 +122,8 @@ bool SPIRVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
|
|||
GR->setCurrentFunc(MIRBuilder.getMF());
|
||||
|
||||
// Assign types and names to all args, and store their types for later.
|
||||
SmallVector<Register, 4> ArgTypeVRegs;
|
||||
FunctionType *FTy = getOriginalFunctionType(F);
|
||||
SmallVector<SPIRVType *, 4> ArgTypeVRegs;
|
||||
if (VRegs.size() > 0) {
|
||||
unsigned i = 0;
|
||||
for (const auto &Arg : F.args()) {
|
||||
|
@ -79,9 +131,18 @@ bool SPIRVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
|
|||
// TODO: handle the case of multiple registers.
|
||||
if (VRegs[i].size() > 1)
|
||||
return false;
|
||||
auto *SpirvTy =
|
||||
GR->assignTypeToVReg(Arg.getType(), VRegs[i][0], MIRBuilder);
|
||||
ArgTypeVRegs.push_back(GR->getSPIRVTypeID(SpirvTy));
|
||||
Type *ArgTy = FTy->getParamType(i);
|
||||
SPIRV::AccessQualifier AQ = SPIRV::AccessQualifier::ReadWrite;
|
||||
MDNode *Node = F.getMetadata("kernel_arg_access_qual");
|
||||
if (Node && i < Node->getNumOperands()) {
|
||||
StringRef AQString = cast<MDString>(Node->getOperand(i))->getString();
|
||||
if (AQString.compare("read_only") == 0)
|
||||
AQ = SPIRV::AccessQualifier::ReadOnly;
|
||||
else if (AQString.compare("write_only") == 0)
|
||||
AQ = SPIRV::AccessQualifier::WriteOnly;
|
||||
}
|
||||
auto *SpirvTy = GR->assignTypeToVReg(ArgTy, VRegs[i][0], MIRBuilder, AQ);
|
||||
ArgTypeVRegs.push_back(SpirvTy);
|
||||
|
||||
if (Arg.hasName())
|
||||
buildOpName(VRegs[i][0], Arg.getName(), MIRBuilder);
|
||||
|
@ -92,8 +153,10 @@ bool SPIRVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
|
|||
SPIRV::Decoration::MaxByteOffset, {DerefBytes});
|
||||
}
|
||||
if (Arg.hasAttribute(Attribute::Alignment)) {
|
||||
auto Alignment = static_cast<unsigned>(
|
||||
Arg.getAttribute(Attribute::Alignment).getValueAsInt());
|
||||
buildOpDecorate(VRegs[i][0], MIRBuilder, SPIRV::Decoration::Alignment,
|
||||
{static_cast<unsigned>(Arg.getParamAlignment())});
|
||||
{Alignment});
|
||||
}
|
||||
if (Arg.hasAttribute(Attribute::ReadOnly)) {
|
||||
auto Attr =
|
||||
|
@ -107,6 +170,38 @@ bool SPIRVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
|
|||
buildOpDecorate(VRegs[i][0], MIRBuilder,
|
||||
SPIRV::Decoration::FuncParamAttr, {Attr});
|
||||
}
|
||||
if (Arg.hasAttribute(Attribute::NoAlias)) {
|
||||
auto Attr =
|
||||
static_cast<unsigned>(SPIRV::FunctionParameterAttribute::NoAlias);
|
||||
buildOpDecorate(VRegs[i][0], MIRBuilder,
|
||||
SPIRV::Decoration::FuncParamAttr, {Attr});
|
||||
}
|
||||
Node = F.getMetadata("kernel_arg_type_qual");
|
||||
if (Node && i < Node->getNumOperands()) {
|
||||
StringRef TypeQual = cast<MDString>(Node->getOperand(i))->getString();
|
||||
if (TypeQual.compare("volatile") == 0)
|
||||
buildOpDecorate(VRegs[i][0], MIRBuilder, SPIRV::Decoration::Volatile,
|
||||
{});
|
||||
}
|
||||
Node = F.getMetadata("spirv.ParameterDecorations");
|
||||
if (Node && i < Node->getNumOperands() &&
|
||||
isa<MDNode>(Node->getOperand(i))) {
|
||||
MDNode *MD = cast<MDNode>(Node->getOperand(i));
|
||||
for (const MDOperand &MDOp : MD->operands()) {
|
||||
MDNode *MD2 = dyn_cast<MDNode>(MDOp);
|
||||
assert(MD2 && "Metadata operand is expected");
|
||||
ConstantInt *Const = getConstInt(MD2, 0);
|
||||
assert(Const && "MDOperand should be ConstantInt");
|
||||
auto Dec = static_cast<SPIRV::Decoration>(Const->getZExtValue());
|
||||
std::vector<uint32_t> DecVec;
|
||||
for (unsigned j = 1; j < MD2->getNumOperands(); j++) {
|
||||
ConstantInt *Const = getConstInt(MD2, j);
|
||||
assert(Const && "MDOperand should be ConstantInt");
|
||||
DecVec.push_back(static_cast<uint32_t>(Const->getZExtValue()));
|
||||
}
|
||||
buildOpDecorate(VRegs[i][0], MIRBuilder, Dec, DecVec);
|
||||
}
|
||||
}
|
||||
++i;
|
||||
}
|
||||
}
|
||||
|
@ -117,30 +212,30 @@ bool SPIRVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
|
|||
MRI->setRegClass(FuncVReg, &SPIRV::IDRegClass);
|
||||
if (F.isDeclaration())
|
||||
GR->add(&F, &MIRBuilder.getMF(), FuncVReg);
|
||||
|
||||
auto *FTy = F.getFunctionType();
|
||||
auto FuncTy = GR->assignTypeToVReg(FTy, FuncVReg, MIRBuilder);
|
||||
SPIRVType *RetTy = GR->getOrCreateSPIRVType(FTy->getReturnType(), MIRBuilder);
|
||||
SPIRVType *FuncTy = GR->getOrCreateOpTypeFunctionWithArgs(
|
||||
FTy, RetTy, ArgTypeVRegs, MIRBuilder);
|
||||
|
||||
// Build the OpTypeFunction declaring it.
|
||||
Register ReturnTypeID = FuncTy->getOperand(1).getReg();
|
||||
uint32_t FuncControl = getFunctionControl(F);
|
||||
|
||||
MIRBuilder.buildInstr(SPIRV::OpFunction)
|
||||
.addDef(FuncVReg)
|
||||
.addUse(ReturnTypeID)
|
||||
.addUse(GR->getSPIRVTypeID(RetTy))
|
||||
.addImm(FuncControl)
|
||||
.addUse(GR->getSPIRVTypeID(FuncTy));
|
||||
|
||||
// Add OpFunctionParameters.
|
||||
const unsigned NumArgs = ArgTypeVRegs.size();
|
||||
for (unsigned i = 0; i < NumArgs; ++i) {
|
||||
int i = 0;
|
||||
for (const auto &Arg : F.args()) {
|
||||
assert(VRegs[i].size() == 1 && "Formal arg has multiple vregs");
|
||||
MRI->setRegClass(VRegs[i][0], &SPIRV::IDRegClass);
|
||||
MIRBuilder.buildInstr(SPIRV::OpFunctionParameter)
|
||||
.addDef(VRegs[i][0])
|
||||
.addUse(ArgTypeVRegs[i]);
|
||||
.addUse(GR->getSPIRVTypeID(ArgTypeVRegs[i]));
|
||||
if (F.isDeclaration())
|
||||
GR->add(F.getArg(i), &MIRBuilder.getMF(), VRegs[i][0]);
|
||||
GR->add(&Arg, &MIRBuilder.getMF(), VRegs[i][0]);
|
||||
i++;
|
||||
}
|
||||
// Name the function.
|
||||
if (F.hasName())
|
||||
|
@ -169,48 +264,51 @@ bool SPIRVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
|
|||
// TODO: handle the case of multiple registers.
|
||||
if (Info.OrigRet.Regs.size() > 1)
|
||||
return false;
|
||||
MachineFunction &MF = MIRBuilder.getMF();
|
||||
GR->setCurrentFunc(MF);
|
||||
FunctionType *FTy = nullptr;
|
||||
const Function *CF = nullptr;
|
||||
|
||||
GR->setCurrentFunc(MIRBuilder.getMF());
|
||||
Register ResVReg =
|
||||
Info.OrigRet.Regs.empty() ? Register(0) : Info.OrigRet.Regs[0];
|
||||
// Emit a regular OpFunctionCall. If it's an externally declared function,
|
||||
// be sure to emit its type and function declaration here. It will be
|
||||
// hoisted globally later.
|
||||
// be sure to emit its type and function declaration here. It will be hoisted
|
||||
// globally later.
|
||||
if (Info.Callee.isGlobal()) {
|
||||
auto *CF = dyn_cast_or_null<const Function>(Info.Callee.getGlobal());
|
||||
CF = dyn_cast_or_null<const Function>(Info.Callee.getGlobal());
|
||||
// TODO: support constexpr casts and indirect calls.
|
||||
if (CF == nullptr)
|
||||
return false;
|
||||
if (CF->isDeclaration()) {
|
||||
FTy = getOriginalFunctionType(*CF);
|
||||
}
|
||||
|
||||
Register ResVReg =
|
||||
Info.OrigRet.Regs.empty() ? Register(0) : Info.OrigRet.Regs[0];
|
||||
if (CF && CF->isDeclaration() &&
|
||||
!GR->find(CF, &MIRBuilder.getMF()).isValid()) {
|
||||
// Emit the type info and forward function declaration to the first MBB
|
||||
// to ensure VReg definition dependencies are valid across all MBBs.
|
||||
MachineBasicBlock::iterator OldII = MIRBuilder.getInsertPt();
|
||||
MachineBasicBlock &OldBB = MIRBuilder.getMBB();
|
||||
MachineBasicBlock &FirstBB = *MIRBuilder.getMF().getBlockNumbered(0);
|
||||
MIRBuilder.setInsertPt(FirstBB, FirstBB.instr_end());
|
||||
MachineIRBuilder FirstBlockBuilder;
|
||||
FirstBlockBuilder.setMF(MF);
|
||||
FirstBlockBuilder.setMBB(*MF.getBlockNumbered(0));
|
||||
|
||||
SmallVector<ArrayRef<Register>, 8> VRegArgs;
|
||||
SmallVector<SmallVector<Register, 1>, 8> ToInsert;
|
||||
for (const Argument &Arg : CF->args()) {
|
||||
if (MIRBuilder.getDataLayout().getTypeStoreSize(Arg.getType()).isZero())
|
||||
continue; // Don't handle zero sized types.
|
||||
ToInsert.push_back({MIRBuilder.getMRI()->createGenericVirtualRegister(
|
||||
LLT::scalar(32))});
|
||||
ToInsert.push_back(
|
||||
{MIRBuilder.getMRI()->createGenericVirtualRegister(LLT::scalar(32))});
|
||||
VRegArgs.push_back(ToInsert.back());
|
||||
}
|
||||
// TODO: Reuse FunctionLoweringInfo.
|
||||
// TODO: Reuse FunctionLoweringInfo
|
||||
FunctionLoweringInfo FuncInfo;
|
||||
lowerFormalArguments(MIRBuilder, *CF, VRegArgs, FuncInfo);
|
||||
MIRBuilder.setInsertPt(OldBB, OldII);
|
||||
}
|
||||
lowerFormalArguments(FirstBlockBuilder, *CF, VRegArgs, FuncInfo);
|
||||
}
|
||||
|
||||
// Make sure there's a valid return reg, even for functions returning void.
|
||||
if (!ResVReg.isValid()) {
|
||||
if (!ResVReg.isValid())
|
||||
ResVReg = MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass);
|
||||
}
|
||||
SPIRVType *RetType =
|
||||
GR->assignTypeToVReg(Info.OrigRet.Ty, ResVReg, MIRBuilder);
|
||||
GR->assignTypeToVReg(FTy->getReturnType(), ResVReg, MIRBuilder);
|
||||
|
||||
// Emit the OpFunctionCall and its args.
|
||||
auto MIB = MIRBuilder.buildInstr(SPIRV::OpFunctionCall)
|
||||
|
@ -224,6 +322,7 @@ bool SPIRVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
|
|||
return false;
|
||||
MIB.addUse(Arg.Regs[0]);
|
||||
}
|
||||
return MIB.constrainAllUses(MIRBuilder.getTII(), *ST.getRegisterInfo(),
|
||||
*ST.getRegBankInfo());
|
||||
const auto &STI = MF.getSubtarget();
|
||||
return MIB.constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
|
||||
*STI.getRegBankInfo());
|
||||
}
|
||||
|
|
|
@ -13,23 +13,21 @@
|
|||
#ifndef LLVM_LIB_TARGET_SPIRV_SPIRVCALLLOWERING_H
|
||||
#define LLVM_LIB_TARGET_SPIRV_SPIRVCALLLOWERING_H
|
||||
|
||||
#include "SPIRVGlobalRegistry.h"
|
||||
#include "llvm/CodeGen/GlobalISel/CallLowering.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class SPIRVGlobalRegistry;
|
||||
class SPIRVSubtarget;
|
||||
class SPIRVTargetLowering;
|
||||
|
||||
class SPIRVCallLowering : public CallLowering {
|
||||
private:
|
||||
const SPIRVSubtarget &ST;
|
||||
// Used to create and assign function, argument, and return type information.
|
||||
SPIRVGlobalRegistry *GR;
|
||||
|
||||
public:
|
||||
SPIRVCallLowering(const SPIRVTargetLowering &TLI, const SPIRVSubtarget &ST,
|
||||
SPIRVGlobalRegistry *GR);
|
||||
SPIRVCallLowering(const SPIRVTargetLowering &TLI, SPIRVGlobalRegistry *GR);
|
||||
|
||||
// Built OpReturn or OpReturnValue.
|
||||
bool lowerReturn(MachineIRBuilder &MIRBuiler, const Value *Val,
|
||||
|
|
|
@ -169,6 +169,8 @@ public:
|
|||
Register find(const Argument *Arg, const MachineFunction *MF) {
|
||||
return AT.find(const_cast<Argument *>(Arg), MF);
|
||||
}
|
||||
|
||||
const SPIRVDuplicatesTracker<Type> *getTypes() { return &TT; }
|
||||
};
|
||||
} // namespace llvm
|
||||
#endif
|
||||
#endif // LLVM_LIB_TARGET_SPIRV_SPIRVDUPLICATESTRACKER_H
|
||||
|
|
|
@ -87,6 +87,7 @@ public:
|
|||
Instruction *visitLoadInst(LoadInst &I);
|
||||
Instruction *visitStoreInst(StoreInst &I);
|
||||
Instruction *visitAllocaInst(AllocaInst &I);
|
||||
Instruction *visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);
|
||||
bool runOnFunction(Function &F) override;
|
||||
};
|
||||
} // namespace
|
||||
|
@ -103,7 +104,7 @@ static inline bool isAssignTypeInstr(const Instruction *I) {
|
|||
|
||||
static bool isMemInstrToReplace(Instruction *I) {
|
||||
return isa<StoreInst>(I) || isa<LoadInst>(I) || isa<InsertValueInst>(I) ||
|
||||
isa<ExtractValueInst>(I);
|
||||
isa<ExtractValueInst>(I) || isa<AtomicCmpXchgInst>(I);
|
||||
}
|
||||
|
||||
static bool isAggrToReplace(const Value *V) {
|
||||
|
@ -134,13 +135,14 @@ void SPIRVEmitIntrinsics::replaceMemInstrUses(Instruction *Old,
|
|||
Instruction *New) {
|
||||
while (!Old->user_empty()) {
|
||||
auto *U = Old->user_back();
|
||||
if (isMemInstrToReplace(U) || isa<ReturnInst>(U)) {
|
||||
U->replaceUsesOfWith(Old, New);
|
||||
} else if (isAssignTypeInstr(U)) {
|
||||
if (isAssignTypeInstr(U)) {
|
||||
IRB->SetInsertPoint(U);
|
||||
SmallVector<Value *, 2> Args = {New, U->getOperand(1)};
|
||||
IRB->CreateIntrinsic(Intrinsic::spv_assign_type, {New->getType()}, Args);
|
||||
U->eraseFromParent();
|
||||
} else if (isMemInstrToReplace(U) || isa<ReturnInst>(U) ||
|
||||
isa<CallInst>(U)) {
|
||||
U->replaceUsesOfWith(Old, New);
|
||||
} else {
|
||||
llvm_unreachable("illegal aggregate intrinsic user");
|
||||
}
|
||||
|
@ -301,8 +303,8 @@ Instruction *SPIRVEmitIntrinsics::visitStoreInst(StoreInst &I) {
|
|||
MachineMemOperand::Flags Flags =
|
||||
TLI->getStoreMemOperandFlags(I, F->getParent()->getDataLayout());
|
||||
auto *PtrOp = I.getPointerOperand();
|
||||
auto *NewI =
|
||||
IRB->CreateIntrinsic(Intrinsic::spv_store, {PtrOp->getType()},
|
||||
auto *NewI = IRB->CreateIntrinsic(
|
||||
Intrinsic::spv_store, {I.getValueOperand()->getType(), PtrOp->getType()},
|
||||
{I.getValueOperand(), PtrOp, IRB->getInt16(Flags),
|
||||
IRB->getInt8(I.getAlign().value())});
|
||||
I.eraseFromParent();
|
||||
|
@ -314,6 +316,22 @@ Instruction *SPIRVEmitIntrinsics::visitAllocaInst(AllocaInst &I) {
|
|||
return &I;
|
||||
}
|
||||
|
||||
Instruction *SPIRVEmitIntrinsics::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
|
||||
assert(I.getType()->isAggregateType() && "Aggregate result is expected");
|
||||
SmallVector<Value *> Args;
|
||||
for (auto &Op : I.operands())
|
||||
Args.push_back(Op);
|
||||
Args.push_back(IRB->getInt32(I.getSyncScopeID()));
|
||||
Args.push_back(IRB->getInt32(
|
||||
static_cast<uint32_t>(getMemSemantics(I.getSuccessOrdering()))));
|
||||
Args.push_back(IRB->getInt32(
|
||||
static_cast<uint32_t>(getMemSemantics(I.getFailureOrdering()))));
|
||||
auto *NewI = IRB->CreateIntrinsic(Intrinsic::spv_cmpxchg,
|
||||
{I.getPointerOperand()->getType()}, {Args});
|
||||
replaceMemInstrUses(&I, NewI);
|
||||
return NewI;
|
||||
}
|
||||
|
||||
void SPIRVEmitIntrinsics::processGlobalValue(GlobalVariable &GV) {
|
||||
// Skip special artifical variable llvm.global.annotations.
|
||||
if (GV.getName() == "llvm.global.annotations")
|
||||
|
@ -351,14 +369,13 @@ void SPIRVEmitIntrinsics::insertAssignTypeIntrs(Instruction *I) {
|
|||
// Check GetElementPtrConstantExpr case.
|
||||
(isa<ConstantExpr>(Op) && isa<GEPOperator>(Op))) {
|
||||
IRB->SetInsertPoint(I);
|
||||
if (isa<UndefValue>(Op) && Op->getType()->isAggregateType())
|
||||
buildIntrWithMD(Intrinsic::spv_assign_type, {IRB->getInt32Ty()}, Op,
|
||||
UndefValue::get(IRB->getInt32Ty()));
|
||||
else
|
||||
buildIntrWithMD(Intrinsic::spv_assign_type, {Op->getType()}, Op, Op);
|
||||
}
|
||||
}
|
||||
// StoreInst's operand type can be changed in the next stage so we need to
|
||||
// store it in the set.
|
||||
if (isa<StoreInst>(I) &&
|
||||
cast<StoreInst>(I)->getValueOperand()->getType()->isAggregateType())
|
||||
AggrStores.insert(I);
|
||||
}
|
||||
|
||||
void SPIRVEmitIntrinsics::processInstrAfterVisit(Instruction *I) {
|
||||
|
@ -378,7 +395,7 @@ void SPIRVEmitIntrinsics::processInstrAfterVisit(Instruction *I) {
|
|||
if ((isa<ConstantAggregateZero>(Op) && Op->getType()->isVectorTy()) ||
|
||||
isa<PHINode>(I) || isa<SwitchInst>(I))
|
||||
TrackConstants = false;
|
||||
if (isa<ConstantData>(Op) && TrackConstants) {
|
||||
if ((isa<ConstantData>(Op) || isa<ConstantExpr>(Op)) && TrackConstants) {
|
||||
unsigned OpNo = Op.getOperandNo();
|
||||
if (II && ((II->getIntrinsicID() == Intrinsic::spv_gep && OpNo == 0) ||
|
||||
(II->paramHasAttr(OpNo, Attribute::ImmArg))))
|
||||
|
@ -405,8 +422,20 @@ bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
|
|||
AggrConsts.clear();
|
||||
AggrStores.clear();
|
||||
|
||||
IRB->SetInsertPoint(&Func.getEntryBlock().front());
|
||||
// StoreInst's operand type can be changed during the next transformations,
|
||||
// so we need to store it in the set. Also store already transformed types.
|
||||
for (auto &I : instructions(Func)) {
|
||||
StoreInst *SI = dyn_cast<StoreInst>(&I);
|
||||
if (!SI)
|
||||
continue;
|
||||
Type *ElTy = SI->getValueOperand()->getType();
|
||||
PointerType *PTy = cast<PointerType>(SI->getOperand(1)->getType());
|
||||
if (ElTy->isAggregateType() || ElTy->isVectorTy() ||
|
||||
!PTy->isOpaqueOrPointeeTypeMatches(ElTy))
|
||||
AggrStores.insert(&I);
|
||||
}
|
||||
|
||||
IRB->SetInsertPoint(&Func.getEntryBlock().front());
|
||||
for (auto &GV : Func.getParent()->globals())
|
||||
processGlobalValue(GV);
|
||||
|
||||
|
|
|
@ -24,6 +24,24 @@ using namespace llvm;
|
|||
SPIRVGlobalRegistry::SPIRVGlobalRegistry(unsigned PointerSize)
|
||||
: PointerSize(PointerSize) {}
|
||||
|
||||
SPIRVType *SPIRVGlobalRegistry::assignIntTypeToVReg(unsigned BitWidth,
|
||||
Register VReg,
|
||||
MachineInstr &I,
|
||||
const SPIRVInstrInfo &TII) {
|
||||
SPIRVType *SpirvType = getOrCreateSPIRVIntegerType(BitWidth, I, TII);
|
||||
assignSPIRVTypeToVReg(SpirvType, VReg, *CurMF);
|
||||
return SpirvType;
|
||||
}
|
||||
|
||||
SPIRVType *SPIRVGlobalRegistry::assignVectTypeToVReg(
|
||||
SPIRVType *BaseType, unsigned NumElements, Register VReg, MachineInstr &I,
|
||||
const SPIRVInstrInfo &TII) {
|
||||
SPIRVType *SpirvType =
|
||||
getOrCreateSPIRVVectorType(BaseType, NumElements, I, TII);
|
||||
assignSPIRVTypeToVReg(SpirvType, VReg, *CurMF);
|
||||
return SpirvType;
|
||||
}
|
||||
|
||||
SPIRVType *SPIRVGlobalRegistry::assignTypeToVReg(
|
||||
const Type *Type, Register VReg, MachineIRBuilder &MIRBuilder,
|
||||
SPIRV::AccessQualifier AccessQual, bool EmitIR) {
|
||||
|
@ -96,6 +114,65 @@ SPIRVType *SPIRVGlobalRegistry::getOpTypeVector(uint32_t NumElems,
|
|||
return MIB;
|
||||
}
|
||||
|
||||
std::tuple<Register, ConstantInt *, bool>
|
||||
SPIRVGlobalRegistry::getOrCreateConstIntReg(uint64_t Val, SPIRVType *SpvType,
|
||||
MachineIRBuilder *MIRBuilder,
|
||||
MachineInstr *I,
|
||||
const SPIRVInstrInfo *TII) {
|
||||
const IntegerType *LLVMIntTy;
|
||||
if (SpvType)
|
||||
LLVMIntTy = cast<IntegerType>(getTypeForSPIRVType(SpvType));
|
||||
else
|
||||
LLVMIntTy = IntegerType::getInt32Ty(CurMF->getFunction().getContext());
|
||||
bool NewInstr = false;
|
||||
// Find a constant in DT or build a new one.
|
||||
ConstantInt *CI = ConstantInt::get(const_cast<IntegerType *>(LLVMIntTy), Val);
|
||||
Register Res = DT.find(CI, CurMF);
|
||||
if (!Res.isValid()) {
|
||||
unsigned BitWidth = SpvType ? getScalarOrVectorBitWidth(SpvType) : 32;
|
||||
LLT LLTy = LLT::scalar(32);
|
||||
Res = CurMF->getRegInfo().createGenericVirtualRegister(LLTy);
|
||||
if (MIRBuilder)
|
||||
assignTypeToVReg(LLVMIntTy, Res, *MIRBuilder);
|
||||
else
|
||||
assignIntTypeToVReg(BitWidth, Res, *I, *TII);
|
||||
DT.add(CI, CurMF, Res);
|
||||
NewInstr = true;
|
||||
}
|
||||
return std::make_tuple(Res, CI, NewInstr);
|
||||
}
|
||||
|
||||
Register SPIRVGlobalRegistry::getOrCreateConstInt(uint64_t Val, MachineInstr &I,
|
||||
SPIRVType *SpvType,
|
||||
const SPIRVInstrInfo &TII) {
|
||||
assert(SpvType);
|
||||
ConstantInt *CI;
|
||||
Register Res;
|
||||
bool New;
|
||||
std::tie(Res, CI, New) =
|
||||
getOrCreateConstIntReg(Val, SpvType, nullptr, &I, &TII);
|
||||
// If we have found Res register which is defined by the passed G_CONSTANT
|
||||
// machine instruction, a new constant instruction should be created.
|
||||
if (!New && (!I.getOperand(0).isReg() || Res != I.getOperand(0).getReg()))
|
||||
return Res;
|
||||
MachineInstrBuilder MIB;
|
||||
MachineBasicBlock &BB = *I.getParent();
|
||||
if (Val) {
|
||||
MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
|
||||
.addDef(Res)
|
||||
.addUse(getSPIRVTypeID(SpvType));
|
||||
addNumImm(APInt(getScalarOrVectorBitWidth(SpvType), Val), MIB);
|
||||
} else {
|
||||
MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
|
||||
.addDef(Res)
|
||||
.addUse(getSPIRVTypeID(SpvType));
|
||||
}
|
||||
const auto &ST = CurMF->getSubtarget();
|
||||
constrainSelectedInstRegOperands(*MIB, *ST.getInstrInfo(),
|
||||
*ST.getRegisterInfo(), *ST.getRegBankInfo());
|
||||
return Res;
|
||||
}
|
||||
|
||||
Register SPIRVGlobalRegistry::buildConstantInt(uint64_t Val,
|
||||
MachineIRBuilder &MIRBuilder,
|
||||
SPIRVType *SpvType,
|
||||
|
@ -112,14 +189,32 @@ Register SPIRVGlobalRegistry::buildConstantInt(uint64_t Val,
|
|||
Register Res = DT.find(ConstInt, &MF);
|
||||
if (!Res.isValid()) {
|
||||
unsigned BitWidth = SpvType ? getScalarOrVectorBitWidth(SpvType) : 32;
|
||||
Res = MF.getRegInfo().createGenericVirtualRegister(LLT::scalar(BitWidth));
|
||||
assignTypeToVReg(LLVMIntTy, Res, MIRBuilder);
|
||||
if (EmitIR)
|
||||
LLT LLTy = LLT::scalar(EmitIR ? BitWidth : 32);
|
||||
Res = MF.getRegInfo().createGenericVirtualRegister(LLTy);
|
||||
assignTypeToVReg(LLVMIntTy, Res, MIRBuilder,
|
||||
SPIRV::AccessQualifier::ReadWrite, EmitIR);
|
||||
DT.add(ConstInt, &MIRBuilder.getMF(), Res);
|
||||
if (EmitIR) {
|
||||
MIRBuilder.buildConstant(Res, *ConstInt);
|
||||
else
|
||||
MIRBuilder.buildInstr(SPIRV::OpConstantI)
|
||||
} else {
|
||||
MachineInstrBuilder MIB;
|
||||
if (Val) {
|
||||
assert(SpvType);
|
||||
MIB = MIRBuilder.buildInstr(SPIRV::OpConstantI)
|
||||
.addDef(Res)
|
||||
.addImm(ConstInt->getSExtValue());
|
||||
.addUse(getSPIRVTypeID(SpvType));
|
||||
addNumImm(APInt(BitWidth, Val), MIB);
|
||||
} else {
|
||||
assert(SpvType);
|
||||
MIB = MIRBuilder.buildInstr(SPIRV::OpConstantNull)
|
||||
.addDef(Res)
|
||||
.addUse(getSPIRVTypeID(SpvType));
|
||||
}
|
||||
const auto &Subtarget = CurMF->getSubtarget();
|
||||
constrainSelectedInstRegOperands(*MIB, *Subtarget.getInstrInfo(),
|
||||
*Subtarget.getRegisterInfo(),
|
||||
*Subtarget.getRegBankInfo());
|
||||
}
|
||||
}
|
||||
return Res;
|
||||
}
|
||||
|
@ -142,11 +237,63 @@ Register SPIRVGlobalRegistry::buildConstantFP(APFloat Val,
|
|||
unsigned BitWidth = SpvType ? getScalarOrVectorBitWidth(SpvType) : 32;
|
||||
Res = MF.getRegInfo().createGenericVirtualRegister(LLT::scalar(BitWidth));
|
||||
assignTypeToVReg(LLVMFPTy, Res, MIRBuilder);
|
||||
DT.add(ConstFP, &MF, Res);
|
||||
MIRBuilder.buildFConstant(Res, *ConstFP);
|
||||
}
|
||||
return Res;
|
||||
}
|
||||
|
||||
Register
|
||||
SPIRVGlobalRegistry::getOrCreateConsIntVector(uint64_t Val, MachineInstr &I,
|
||||
SPIRVType *SpvType,
|
||||
const SPIRVInstrInfo &TII) {
|
||||
const Type *LLVMTy = getTypeForSPIRVType(SpvType);
|
||||
assert(LLVMTy->isVectorTy());
|
||||
const FixedVectorType *LLVMVecTy = cast<FixedVectorType>(LLVMTy);
|
||||
Type *LLVMBaseTy = LLVMVecTy->getElementType();
|
||||
// Find a constant vector in DT or build a new one.
|
||||
const auto ConstInt = ConstantInt::get(LLVMBaseTy, Val);
|
||||
auto ConstVec =
|
||||
ConstantVector::getSplat(LLVMVecTy->getElementCount(), ConstInt);
|
||||
Register Res = DT.find(ConstVec, CurMF);
|
||||
if (!Res.isValid()) {
|
||||
unsigned BitWidth = getScalarOrVectorBitWidth(SpvType);
|
||||
SPIRVType *SpvBaseType = getOrCreateSPIRVIntegerType(BitWidth, I, TII);
|
||||
// SpvScalConst should be created before SpvVecConst to avoid undefined ID
|
||||
// error on validation.
|
||||
// TODO: can moved below once sorting of types/consts/defs is implemented.
|
||||
Register SpvScalConst;
|
||||
if (Val)
|
||||
SpvScalConst = getOrCreateConstInt(Val, I, SpvBaseType, TII);
|
||||
// TODO: maybe use bitwidth of base type.
|
||||
LLT LLTy = LLT::scalar(32);
|
||||
Register SpvVecConst =
|
||||
CurMF->getRegInfo().createGenericVirtualRegister(LLTy);
|
||||
const unsigned ElemCnt = SpvType->getOperand(2).getImm();
|
||||
assignVectTypeToVReg(SpvBaseType, ElemCnt, SpvVecConst, I, TII);
|
||||
DT.add(ConstVec, CurMF, SpvVecConst);
|
||||
MachineInstrBuilder MIB;
|
||||
MachineBasicBlock &BB = *I.getParent();
|
||||
if (Val) {
|
||||
MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantComposite))
|
||||
.addDef(SpvVecConst)
|
||||
.addUse(getSPIRVTypeID(SpvType));
|
||||
for (unsigned i = 0; i < ElemCnt; ++i)
|
||||
MIB.addUse(SpvScalConst);
|
||||
} else {
|
||||
MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
|
||||
.addDef(SpvVecConst)
|
||||
.addUse(getSPIRVTypeID(SpvType));
|
||||
}
|
||||
const auto &Subtarget = CurMF->getSubtarget();
|
||||
constrainSelectedInstRegOperands(*MIB, *Subtarget.getInstrInfo(),
|
||||
*Subtarget.getRegisterInfo(),
|
||||
*Subtarget.getRegBankInfo());
|
||||
return SpvVecConst;
|
||||
}
|
||||
return Res;
|
||||
}
|
||||
|
||||
Register SPIRVGlobalRegistry::buildGlobalVariable(
|
||||
Register ResVReg, SPIRVType *BaseType, StringRef Name,
|
||||
const GlobalValue *GV, SPIRV::StorageClass Storage,
|
||||
|
@ -169,7 +316,13 @@ Register SPIRVGlobalRegistry::buildGlobalVariable(
|
|||
}
|
||||
GV = GVar;
|
||||
}
|
||||
Register Reg;
|
||||
Register Reg = DT.find(GVar, &MIRBuilder.getMF());
|
||||
if (Reg.isValid()) {
|
||||
if (Reg != ResVReg)
|
||||
MIRBuilder.buildCopy(ResVReg, Reg);
|
||||
return ResVReg;
|
||||
}
|
||||
|
||||
auto MIB = MIRBuilder.buildInstr(SPIRV::OpVariable)
|
||||
.addDef(ResVReg)
|
||||
.addUse(getSPIRVTypeID(BaseType))
|
||||
|
@ -234,14 +387,76 @@ SPIRVType *SPIRVGlobalRegistry::getOpTypeArray(uint32_t NumElems,
|
|||
return MIB;
|
||||
}
|
||||
|
||||
SPIRVType *SPIRVGlobalRegistry::getOpTypeOpaque(const StructType *Ty,
|
||||
MachineIRBuilder &MIRBuilder) {
|
||||
assert(Ty->hasName());
|
||||
const StringRef Name = Ty->hasName() ? Ty->getName() : "";
|
||||
Register ResVReg = createTypeVReg(MIRBuilder);
|
||||
auto MIB = MIRBuilder.buildInstr(SPIRV::OpTypeOpaque).addDef(ResVReg);
|
||||
addStringImm(Name, MIB);
|
||||
buildOpName(ResVReg, Name, MIRBuilder);
|
||||
return MIB;
|
||||
}
|
||||
|
||||
SPIRVType *SPIRVGlobalRegistry::getOpTypeStruct(const StructType *Ty,
|
||||
MachineIRBuilder &MIRBuilder,
|
||||
bool EmitIR) {
|
||||
SmallVector<Register, 4> FieldTypes;
|
||||
for (const auto &Elem : Ty->elements()) {
|
||||
SPIRVType *ElemTy = findSPIRVType(Elem, MIRBuilder);
|
||||
assert(ElemTy && ElemTy->getOpcode() != SPIRV::OpTypeVoid &&
|
||||
"Invalid struct element type");
|
||||
FieldTypes.push_back(getSPIRVTypeID(ElemTy));
|
||||
}
|
||||
Register ResVReg = createTypeVReg(MIRBuilder);
|
||||
auto MIB = MIRBuilder.buildInstr(SPIRV::OpTypeStruct).addDef(ResVReg);
|
||||
for (const auto &Ty : FieldTypes)
|
||||
MIB.addUse(Ty);
|
||||
if (Ty->hasName())
|
||||
buildOpName(ResVReg, Ty->getName(), MIRBuilder);
|
||||
if (Ty->isPacked())
|
||||
buildOpDecorate(ResVReg, MIRBuilder, SPIRV::Decoration::CPacked, {});
|
||||
return MIB;
|
||||
}
|
||||
|
||||
static bool isOpenCLBuiltinType(const StructType *SType) {
|
||||
return SType->isOpaque() && SType->hasName() &&
|
||||
SType->getName().startswith("opencl.");
|
||||
}
|
||||
|
||||
static bool isSPIRVBuiltinType(const StructType *SType) {
|
||||
return SType->isOpaque() && SType->hasName() &&
|
||||
SType->getName().startswith("spirv.");
|
||||
}
|
||||
|
||||
static bool isSpecialType(const Type *Ty) {
|
||||
if (auto PType = dyn_cast<PointerType>(Ty)) {
|
||||
if (!PType->isOpaque())
|
||||
Ty = PType->getNonOpaquePointerElementType();
|
||||
}
|
||||
if (auto SType = dyn_cast<StructType>(Ty))
|
||||
return isOpenCLBuiltinType(SType) || isSPIRVBuiltinType(SType);
|
||||
return false;
|
||||
}
|
||||
|
||||
SPIRVType *SPIRVGlobalRegistry::getOpTypePointer(SPIRV::StorageClass SC,
|
||||
SPIRVType *ElemType,
|
||||
MachineIRBuilder &MIRBuilder) {
|
||||
auto MIB = MIRBuilder.buildInstr(SPIRV::OpTypePointer)
|
||||
.addDef(createTypeVReg(MIRBuilder))
|
||||
MachineIRBuilder &MIRBuilder,
|
||||
Register Reg) {
|
||||
if (!Reg.isValid())
|
||||
Reg = createTypeVReg(MIRBuilder);
|
||||
return MIRBuilder.buildInstr(SPIRV::OpTypePointer)
|
||||
.addDef(Reg)
|
||||
.addImm(static_cast<uint32_t>(SC))
|
||||
.addUse(getSPIRVTypeID(ElemType));
|
||||
return MIB;
|
||||
}
|
||||
|
||||
SPIRVType *
|
||||
SPIRVGlobalRegistry::getOpTypeForwardPointer(SPIRV::StorageClass SC,
|
||||
MachineIRBuilder &MIRBuilder) {
|
||||
return MIRBuilder.buildInstr(SPIRV::OpTypeForwardPointer)
|
||||
.addUse(createTypeVReg(MIRBuilder))
|
||||
.addImm(static_cast<uint32_t>(SC));
|
||||
}
|
||||
|
||||
SPIRVType *SPIRVGlobalRegistry::getOpTypeFunction(
|
||||
|
@ -255,10 +470,49 @@ SPIRVType *SPIRVGlobalRegistry::getOpTypeFunction(
|
|||
return MIB;
|
||||
}
|
||||
|
||||
SPIRVType *SPIRVGlobalRegistry::getOrCreateOpTypeFunctionWithArgs(
|
||||
const Type *Ty, SPIRVType *RetType,
|
||||
const SmallVectorImpl<SPIRVType *> &ArgTypes,
|
||||
MachineIRBuilder &MIRBuilder) {
|
||||
Register Reg = DT.find(Ty, &MIRBuilder.getMF());
|
||||
if (Reg.isValid())
|
||||
return getSPIRVTypeForVReg(Reg);
|
||||
SPIRVType *SpirvType = getOpTypeFunction(RetType, ArgTypes, MIRBuilder);
|
||||
return finishCreatingSPIRVType(Ty, SpirvType);
|
||||
}
|
||||
|
||||
SPIRVType *SPIRVGlobalRegistry::findSPIRVType(const Type *Ty,
|
||||
MachineIRBuilder &MIRBuilder,
|
||||
SPIRV::AccessQualifier AccQual,
|
||||
bool EmitIR) {
|
||||
Register Reg = DT.find(Ty, &MIRBuilder.getMF());
|
||||
if (Reg.isValid())
|
||||
return getSPIRVTypeForVReg(Reg);
|
||||
if (ForwardPointerTypes.find(Ty) != ForwardPointerTypes.end())
|
||||
return ForwardPointerTypes[Ty];
|
||||
return restOfCreateSPIRVType(Ty, MIRBuilder, AccQual, EmitIR);
|
||||
}
|
||||
|
||||
Register SPIRVGlobalRegistry::getSPIRVTypeID(const SPIRVType *SpirvType) const {
|
||||
assert(SpirvType && "Attempting to get type id for nullptr type.");
|
||||
if (SpirvType->getOpcode() == SPIRV::OpTypeForwardPointer)
|
||||
return SpirvType->uses().begin()->getReg();
|
||||
return SpirvType->defs().begin()->getReg();
|
||||
}
|
||||
|
||||
SPIRVType *SPIRVGlobalRegistry::createSPIRVType(const Type *Ty,
|
||||
MachineIRBuilder &MIRBuilder,
|
||||
SPIRV::AccessQualifier AccQual,
|
||||
bool EmitIR) {
|
||||
assert(!isSpecialType(Ty));
|
||||
auto &TypeToSPIRVTypeMap = DT.getTypes()->getAllUses();
|
||||
auto t = TypeToSPIRVTypeMap.find(Ty);
|
||||
if (t != TypeToSPIRVTypeMap.end()) {
|
||||
auto tt = t->second.find(&MIRBuilder.getMF());
|
||||
if (tt != t->second.end())
|
||||
return getSPIRVTypeForVReg(tt->second);
|
||||
}
|
||||
|
||||
if (auto IType = dyn_cast<IntegerType>(Ty)) {
|
||||
const unsigned Width = IType->getBitWidth();
|
||||
return Width == 1 ? getOpTypeBool(MIRBuilder)
|
||||
|
@ -269,21 +523,25 @@ SPIRVType *SPIRVGlobalRegistry::createSPIRVType(const Type *Ty,
|
|||
if (Ty->isVoidTy())
|
||||
return getOpTypeVoid(MIRBuilder);
|
||||
if (Ty->isVectorTy()) {
|
||||
auto El = getOrCreateSPIRVType(cast<FixedVectorType>(Ty)->getElementType(),
|
||||
MIRBuilder);
|
||||
SPIRVType *El =
|
||||
findSPIRVType(cast<FixedVectorType>(Ty)->getElementType(), MIRBuilder);
|
||||
return getOpTypeVector(cast<FixedVectorType>(Ty)->getNumElements(), El,
|
||||
MIRBuilder);
|
||||
}
|
||||
if (Ty->isArrayTy()) {
|
||||
auto *El = getOrCreateSPIRVType(Ty->getArrayElementType(), MIRBuilder);
|
||||
SPIRVType *El = findSPIRVType(Ty->getArrayElementType(), MIRBuilder);
|
||||
return getOpTypeArray(Ty->getArrayNumElements(), El, MIRBuilder, EmitIR);
|
||||
}
|
||||
assert(!isa<StructType>(Ty) && "Unsupported StructType");
|
||||
if (auto SType = dyn_cast<StructType>(Ty)) {
|
||||
if (SType->isOpaque())
|
||||
return getOpTypeOpaque(SType, MIRBuilder);
|
||||
return getOpTypeStruct(SType, MIRBuilder, EmitIR);
|
||||
}
|
||||
if (auto FType = dyn_cast<FunctionType>(Ty)) {
|
||||
SPIRVType *RetTy = getOrCreateSPIRVType(FType->getReturnType(), MIRBuilder);
|
||||
SPIRVType *RetTy = findSPIRVType(FType->getReturnType(), MIRBuilder);
|
||||
SmallVector<SPIRVType *, 4> ParamTypes;
|
||||
for (const auto &t : FType->params()) {
|
||||
ParamTypes.push_back(getOrCreateSPIRVType(t, MIRBuilder));
|
||||
ParamTypes.push_back(findSPIRVType(t, MIRBuilder));
|
||||
}
|
||||
return getOpTypeFunction(RetTy, ParamTypes, MIRBuilder);
|
||||
}
|
||||
|
@ -292,24 +550,51 @@ SPIRVType *SPIRVGlobalRegistry::createSPIRVType(const Type *Ty,
|
|||
// At the moment, all opaque pointers correspond to i8 element type.
|
||||
// TODO: change the implementation once opaque pointers are supported
|
||||
// in the SPIR-V specification.
|
||||
if (PType->isOpaque()) {
|
||||
if (PType->isOpaque())
|
||||
SpvElementType = getOrCreateSPIRVIntegerType(8, MIRBuilder);
|
||||
} else {
|
||||
Type *ElemType = PType->getNonOpaquePointerElementType();
|
||||
// TODO: support OpenCL and SPIRV builtins like image2d_t that are passed
|
||||
// as pointers, but should be treated as custom types like OpTypeImage.
|
||||
assert(!isa<StructType>(ElemType) && "Unsupported StructType pointer");
|
||||
|
||||
// Otherwise, treat it as a regular pointer type.
|
||||
SpvElementType = getOrCreateSPIRVType(
|
||||
ElemType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, EmitIR);
|
||||
}
|
||||
else
|
||||
SpvElementType =
|
||||
findSPIRVType(PType->getNonOpaquePointerElementType(), MIRBuilder,
|
||||
SPIRV::AccessQualifier::ReadWrite, EmitIR);
|
||||
auto SC = addressSpaceToStorageClass(PType->getAddressSpace());
|
||||
return getOpTypePointer(SC, SpvElementType, MIRBuilder);
|
||||
// Null pointer means we have a loop in type definitions, make and
|
||||
// return corresponding OpTypeForwardPointer.
|
||||
if (SpvElementType == nullptr) {
|
||||
if (ForwardPointerTypes.find(Ty) == ForwardPointerTypes.end())
|
||||
ForwardPointerTypes[PType] = getOpTypeForwardPointer(SC, MIRBuilder);
|
||||
return ForwardPointerTypes[PType];
|
||||
}
|
||||
Register Reg(0);
|
||||
// If we have forward pointer associated with this type, use its register
|
||||
// operand to create OpTypePointer.
|
||||
if (ForwardPointerTypes.find(PType) != ForwardPointerTypes.end())
|
||||
Reg = getSPIRVTypeID(ForwardPointerTypes[PType]);
|
||||
|
||||
return getOpTypePointer(SC, SpvElementType, MIRBuilder, Reg);
|
||||
}
|
||||
llvm_unreachable("Unable to convert LLVM type to SPIRVType");
|
||||
}
|
||||
|
||||
SPIRVType *SPIRVGlobalRegistry::restOfCreateSPIRVType(
|
||||
const Type *Ty, MachineIRBuilder &MIRBuilder,
|
||||
SPIRV::AccessQualifier AccessQual, bool EmitIR) {
|
||||
if (TypesInProcessing.count(Ty) && !Ty->isPointerTy())
|
||||
return nullptr;
|
||||
TypesInProcessing.insert(Ty);
|
||||
SPIRVType *SpirvType = createSPIRVType(Ty, MIRBuilder, AccessQual, EmitIR);
|
||||
TypesInProcessing.erase(Ty);
|
||||
VRegToTypeMap[&MIRBuilder.getMF()][getSPIRVTypeID(SpirvType)] = SpirvType;
|
||||
SPIRVToLLVMType[SpirvType] = Ty;
|
||||
Register Reg = DT.find(Ty, &MIRBuilder.getMF());
|
||||
// Do not add OpTypeForwardPointer to DT, a corresponding normal pointer type
|
||||
// will be added later. For special types it is already added to DT.
|
||||
if (SpirvType->getOpcode() != SPIRV::OpTypeForwardPointer && !Reg.isValid() &&
|
||||
!isSpecialType(Ty))
|
||||
DT.add(Ty, &MIRBuilder.getMF(), getSPIRVTypeID(SpirvType));
|
||||
|
||||
return SpirvType;
|
||||
}
|
||||
|
||||
SPIRVType *SPIRVGlobalRegistry::getSPIRVTypeForVReg(Register VReg) const {
|
||||
auto t = VRegToTypeMap.find(CurMF);
|
||||
if (t != VRegToTypeMap.end()) {
|
||||
|
@ -321,13 +606,26 @@ SPIRVType *SPIRVGlobalRegistry::getSPIRVTypeForVReg(Register VReg) const {
|
|||
}
|
||||
|
||||
SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVType(
|
||||
const Type *Type, MachineIRBuilder &MIRBuilder,
|
||||
const Type *Ty, MachineIRBuilder &MIRBuilder,
|
||||
SPIRV::AccessQualifier AccessQual, bool EmitIR) {
|
||||
Register Reg = DT.find(Type, &MIRBuilder.getMF());
|
||||
Register Reg = DT.find(Ty, &MIRBuilder.getMF());
|
||||
if (Reg.isValid())
|
||||
return getSPIRVTypeForVReg(Reg);
|
||||
SPIRVType *SpirvType = createSPIRVType(Type, MIRBuilder, AccessQual, EmitIR);
|
||||
return restOfCreateSPIRVType(Type, SpirvType);
|
||||
TypesInProcessing.clear();
|
||||
SPIRVType *STy = restOfCreateSPIRVType(Ty, MIRBuilder, AccessQual, EmitIR);
|
||||
// Create normal pointer types for the corresponding OpTypeForwardPointers.
|
||||
for (auto &CU : ForwardPointerTypes) {
|
||||
const Type *Ty2 = CU.first;
|
||||
SPIRVType *STy2 = CU.second;
|
||||
if ((Reg = DT.find(Ty2, &MIRBuilder.getMF())).isValid())
|
||||
STy2 = getSPIRVTypeForVReg(Reg);
|
||||
else
|
||||
STy2 = restOfCreateSPIRVType(Ty2, MIRBuilder, AccessQual, EmitIR);
|
||||
if (Ty == Ty2)
|
||||
STy = STy2;
|
||||
}
|
||||
ForwardPointerTypes.clear();
|
||||
return STy;
|
||||
}
|
||||
|
||||
bool SPIRVGlobalRegistry::isScalarOfType(Register VReg,
|
||||
|
@ -393,7 +691,7 @@ SPIRVGlobalRegistry::getOrCreateSPIRVIntegerType(unsigned BitWidth,
|
|||
MIRBuilder);
|
||||
}
|
||||
|
||||
SPIRVType *SPIRVGlobalRegistry::restOfCreateSPIRVType(const Type *LLVMTy,
|
||||
SPIRVType *SPIRVGlobalRegistry::finishCreatingSPIRVType(const Type *LLVMTy,
|
||||
SPIRVType *SpirvType) {
|
||||
assert(CurMF == SpirvType->getMF());
|
||||
VRegToTypeMap[CurMF][getSPIRVTypeID(SpirvType)] = SpirvType;
|
||||
|
@ -413,7 +711,7 @@ SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVIntegerType(
|
|||
.addDef(createTypeVReg(CurMF->getRegInfo()))
|
||||
.addImm(BitWidth)
|
||||
.addImm(0);
|
||||
return restOfCreateSPIRVType(LLVMTy, MIB);
|
||||
return finishCreatingSPIRVType(LLVMTy, MIB);
|
||||
}
|
||||
|
||||
SPIRVType *
|
||||
|
@ -423,6 +721,19 @@ SPIRVGlobalRegistry::getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder) {
|
|||
MIRBuilder);
|
||||
}
|
||||
|
||||
SPIRVType *
|
||||
SPIRVGlobalRegistry::getOrCreateSPIRVBoolType(MachineInstr &I,
|
||||
const SPIRVInstrInfo &TII) {
|
||||
Type *LLVMTy = IntegerType::get(CurMF->getFunction().getContext(), 1);
|
||||
Register Reg = DT.find(LLVMTy, CurMF);
|
||||
if (Reg.isValid())
|
||||
return getSPIRVTypeForVReg(Reg);
|
||||
MachineBasicBlock &BB = *I.getParent();
|
||||
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpTypeBool))
|
||||
.addDef(createTypeVReg(CurMF->getRegInfo()));
|
||||
return finishCreatingSPIRVType(LLVMTy, MIB);
|
||||
}
|
||||
|
||||
SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVVectorType(
|
||||
SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder) {
|
||||
return getOrCreateSPIRVType(
|
||||
|
@ -436,12 +747,15 @@ SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVVectorType(
|
|||
const SPIRVInstrInfo &TII) {
|
||||
Type *LLVMTy = FixedVectorType::get(
|
||||
const_cast<Type *>(getTypeForSPIRVType(BaseType)), NumElements);
|
||||
Register Reg = DT.find(LLVMTy, CurMF);
|
||||
if (Reg.isValid())
|
||||
return getSPIRVTypeForVReg(Reg);
|
||||
MachineBasicBlock &BB = *I.getParent();
|
||||
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpTypeVector))
|
||||
.addDef(createTypeVReg(CurMF->getRegInfo()))
|
||||
.addUse(getSPIRVTypeID(BaseType))
|
||||
.addImm(NumElements);
|
||||
return restOfCreateSPIRVType(LLVMTy, MIB);
|
||||
return finishCreatingSPIRVType(LLVMTy, MIB);
|
||||
}
|
||||
|
||||
SPIRVType *
|
||||
|
@ -460,10 +774,39 @@ SPIRVType *SPIRVGlobalRegistry::getOrCreateSPIRVPointerType(
|
|||
Type *LLVMTy =
|
||||
PointerType::get(const_cast<Type *>(getTypeForSPIRVType(BaseType)),
|
||||
storageClassToAddressSpace(SC));
|
||||
Register Reg = DT.find(LLVMTy, CurMF);
|
||||
if (Reg.isValid())
|
||||
return getSPIRVTypeForVReg(Reg);
|
||||
MachineBasicBlock &BB = *I.getParent();
|
||||
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpTypePointer))
|
||||
.addDef(createTypeVReg(CurMF->getRegInfo()))
|
||||
.addImm(static_cast<uint32_t>(SC))
|
||||
.addUse(getSPIRVTypeID(BaseType));
|
||||
return restOfCreateSPIRVType(LLVMTy, MIB);
|
||||
return finishCreatingSPIRVType(LLVMTy, MIB);
|
||||
}
|
||||
|
||||
Register SPIRVGlobalRegistry::getOrCreateUndef(MachineInstr &I,
|
||||
SPIRVType *SpvType,
|
||||
const SPIRVInstrInfo &TII) {
|
||||
assert(SpvType);
|
||||
const Type *LLVMTy = getTypeForSPIRVType(SpvType);
|
||||
assert(LLVMTy);
|
||||
// Find a constant in DT or build a new one.
|
||||
UndefValue *UV = UndefValue::get(const_cast<Type *>(LLVMTy));
|
||||
Register Res = DT.find(UV, CurMF);
|
||||
if (Res.isValid())
|
||||
return Res;
|
||||
LLT LLTy = LLT::scalar(32);
|
||||
Res = CurMF->getRegInfo().createGenericVirtualRegister(LLTy);
|
||||
assignSPIRVTypeToVReg(SpvType, Res, *CurMF);
|
||||
DT.add(UV, CurMF, Res);
|
||||
|
||||
MachineInstrBuilder MIB;
|
||||
MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
|
||||
.addDef(Res)
|
||||
.addUse(getSPIRVTypeID(SpvType));
|
||||
const auto &ST = CurMF->getSubtarget();
|
||||
constrainSelectedInstRegOperands(*MIB, *ST.getInstrInfo(),
|
||||
*ST.getRegisterInfo(), *ST.getRegBankInfo());
|
||||
return Res;
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ class SPIRVGlobalRegistry {
|
|||
// Do not confuse this with DuplicatesTracker as DT maps Type* to <MF, Reg>
|
||||
// where Reg = OpType...
|
||||
// while VRegToTypeMap tracks SPIR-V type assigned to other regs (i.e. not
|
||||
// type-declaring ones)
|
||||
// type-declaring ones).
|
||||
DenseMap<const MachineFunction *, DenseMap<Register, SPIRVType *>>
|
||||
VRegToTypeMap;
|
||||
|
||||
|
@ -38,6 +38,9 @@ class SPIRVGlobalRegistry {
|
|||
|
||||
DenseMap<SPIRVType *, const Type *> SPIRVToLLVMType;
|
||||
|
||||
SmallPtrSet<const Type *, 4> TypesInProcessing;
|
||||
DenseMap<const Type *, SPIRVType *> ForwardPointerTypes;
|
||||
|
||||
// Number of bits pointers and size_t integers require.
|
||||
const unsigned PointerSize;
|
||||
|
||||
|
@ -46,6 +49,14 @@ class SPIRVGlobalRegistry {
|
|||
createSPIRVType(const Type *Type, MachineIRBuilder &MIRBuilder,
|
||||
SPIRV::AccessQualifier AQ = SPIRV::AccessQualifier::ReadWrite,
|
||||
bool EmitIR = true);
|
||||
SPIRVType *findSPIRVType(
|
||||
const Type *Ty, MachineIRBuilder &MIRBuilder,
|
||||
SPIRV::AccessQualifier accessQual = SPIRV::AccessQualifier::ReadWrite,
|
||||
bool EmitIR = true);
|
||||
SPIRVType *restOfCreateSPIRVType(const Type *Type,
|
||||
MachineIRBuilder &MIRBuilder,
|
||||
SPIRV::AccessQualifier AccessQual,
|
||||
bool EmitIR);
|
||||
|
||||
public:
|
||||
SPIRVGlobalRegistry(unsigned PointerSize);
|
||||
|
@ -91,6 +102,11 @@ public:
|
|||
const Type *Type, Register VReg, MachineIRBuilder &MIRBuilder,
|
||||
SPIRV::AccessQualifier AQ = SPIRV::AccessQualifier::ReadWrite,
|
||||
bool EmitIR = true);
|
||||
SPIRVType *assignIntTypeToVReg(unsigned BitWidth, Register VReg,
|
||||
MachineInstr &I, const SPIRVInstrInfo &TII);
|
||||
SPIRVType *assignVectTypeToVReg(SPIRVType *BaseType, unsigned NumElements,
|
||||
Register VReg, MachineInstr &I,
|
||||
const SPIRVInstrInfo &TII);
|
||||
|
||||
// In cases where the SPIR-V type is already known, this function can be
|
||||
// used to map it to the given VReg via an ASSIGN_TYPE instruction.
|
||||
|
@ -123,10 +139,7 @@ public:
|
|||
}
|
||||
|
||||
// Return the VReg holding the result of the given OpTypeXXX instruction.
|
||||
Register getSPIRVTypeID(const SPIRVType *SpirvType) const {
|
||||
assert(SpirvType && "Attempting to get type id for nullptr type.");
|
||||
return SpirvType->defs().begin()->getReg();
|
||||
}
|
||||
Register getSPIRVTypeID(const SPIRVType *SpirvType) const;
|
||||
|
||||
void setCurrentFunc(MachineFunction &MF) { CurMF = &MF; }
|
||||
|
||||
|
@ -167,19 +180,38 @@ private:
|
|||
SPIRVType *getOpTypeArray(uint32_t NumElems, SPIRVType *ElemType,
|
||||
MachineIRBuilder &MIRBuilder, bool EmitIR = true);
|
||||
|
||||
SPIRVType *getOpTypeOpaque(const StructType *Ty,
|
||||
MachineIRBuilder &MIRBuilder);
|
||||
|
||||
SPIRVType *getOpTypeStruct(const StructType *Ty, MachineIRBuilder &MIRBuilder,
|
||||
bool EmitIR = true);
|
||||
|
||||
SPIRVType *getOpTypePointer(SPIRV::StorageClass SC, SPIRVType *ElemType,
|
||||
MachineIRBuilder &MIRBuilder, Register Reg);
|
||||
|
||||
SPIRVType *getOpTypeForwardPointer(SPIRV::StorageClass SC,
|
||||
MachineIRBuilder &MIRBuilder);
|
||||
|
||||
SPIRVType *getOpTypeFunction(SPIRVType *RetType,
|
||||
const SmallVectorImpl<SPIRVType *> &ArgTypes,
|
||||
MachineIRBuilder &MIRBuilder);
|
||||
SPIRVType *restOfCreateSPIRVType(const Type *LLVMTy, SPIRVType *SpirvType);
|
||||
std::tuple<Register, ConstantInt *, bool> getOrCreateConstIntReg(
|
||||
uint64_t Val, SPIRVType *SpvType, MachineIRBuilder *MIRBuilder,
|
||||
MachineInstr *I = nullptr, const SPIRVInstrInfo *TII = nullptr);
|
||||
SPIRVType *finishCreatingSPIRVType(const Type *LLVMTy, SPIRVType *SpirvType);
|
||||
|
||||
public:
|
||||
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder,
|
||||
SPIRVType *SpvType = nullptr, bool EmitIR = true);
|
||||
Register getOrCreateConstInt(uint64_t Val, MachineInstr &I,
|
||||
SPIRVType *SpvType, const SPIRVInstrInfo &TII);
|
||||
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder,
|
||||
SPIRVType *SpvType = nullptr);
|
||||
Register getOrCreateConsIntVector(uint64_t Val, MachineInstr &I,
|
||||
SPIRVType *SpvType,
|
||||
const SPIRVInstrInfo &TII);
|
||||
Register getOrCreateUndef(MachineInstr &I, SPIRVType *SpvType,
|
||||
const SPIRVInstrInfo &TII);
|
||||
Register
|
||||
buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name,
|
||||
const GlobalValue *GV, SPIRV::StorageClass Storage,
|
||||
|
@ -193,19 +225,24 @@ public:
|
|||
SPIRVType *getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineInstr &I,
|
||||
const SPIRVInstrInfo &TII);
|
||||
SPIRVType *getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder);
|
||||
SPIRVType *getOrCreateSPIRVBoolType(MachineInstr &I,
|
||||
const SPIRVInstrInfo &TII);
|
||||
SPIRVType *getOrCreateSPIRVVectorType(SPIRVType *BaseType,
|
||||
unsigned NumElements,
|
||||
MachineIRBuilder &MIRBuilder);
|
||||
SPIRVType *getOrCreateSPIRVVectorType(SPIRVType *BaseType,
|
||||
unsigned NumElements, MachineInstr &I,
|
||||
const SPIRVInstrInfo &TII);
|
||||
|
||||
SPIRVType *getOrCreateSPIRVPointerType(
|
||||
SPIRVType *BaseType, MachineIRBuilder &MIRBuilder,
|
||||
SPIRV::StorageClass SClass = SPIRV::StorageClass::Function);
|
||||
SPIRVType *getOrCreateSPIRVPointerType(
|
||||
SPIRVType *BaseType, MachineInstr &I, const SPIRVInstrInfo &TII,
|
||||
SPIRV::StorageClass SClass = SPIRV::StorageClass::Function);
|
||||
SPIRVType *getOrCreateOpTypeFunctionWithArgs(
|
||||
const Type *Ty, SPIRVType *RetType,
|
||||
const SmallVectorImpl<SPIRVType *> &ArgTypes,
|
||||
MachineIRBuilder &MIRBuilder);
|
||||
};
|
||||
} // end namespace llvm
|
||||
#endif // LLLVM_LIB_TARGET_SPIRV_SPIRVTYPEMANAGER_H
|
||||
|
|
|
@ -52,7 +52,7 @@ bool SPIRVInstrInfo::isTypeDeclInstr(const MachineInstr &MI) const {
|
|||
auto DefRegClass = MRI.getRegClassOrNull(MI.getOperand(0).getReg());
|
||||
return DefRegClass && DefRegClass->getID() == SPIRV::TYPERegClass.getID();
|
||||
} else {
|
||||
return false;
|
||||
return MI.getOpcode() == SPIRV::OpTypeForwardPointer;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -193,3 +193,15 @@ void SPIRVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
|||
auto &MRI = I->getMF()->getRegInfo();
|
||||
MRI.replaceRegWith(DstOp.getReg(), SrcOp.getReg());
|
||||
}
|
||||
|
||||
bool SPIRVInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
|
||||
if (MI.getOpcode() == SPIRV::GET_ID || MI.getOpcode() == SPIRV::GET_fID ||
|
||||
MI.getOpcode() == SPIRV::GET_pID || MI.getOpcode() == SPIRV::GET_vfID ||
|
||||
MI.getOpcode() == SPIRV::GET_vID) {
|
||||
auto &MRI = MI.getMF()->getRegInfo();
|
||||
MRI.replaceRegWith(MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
|
||||
MI.eraseFromParent();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -48,6 +48,7 @@ public:
|
|||
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
|
||||
const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
|
||||
bool KillSrc) const override;
|
||||
bool expandPostRAPseudo(MachineInstr &MI) const override;
|
||||
};
|
||||
} // namespace llvm
|
||||
|
||||
|
|
|
@ -449,6 +449,7 @@ def OpCopyLogical: UnOp<"OpCopyLogical", 400>;
|
|||
|
||||
def OpSNegate: UnOp<"OpSNegate", 126>;
|
||||
def OpFNegate: UnOpTyped<"OpFNegate", 127, fID, fneg>;
|
||||
def OpFNegateV: UnOpTyped<"OpFNegate", 127, vfID, fneg>;
|
||||
defm OpIAdd: BinOpTypedGen<"OpIAdd", 128, add, 0, 1>;
|
||||
defm OpFAdd: BinOpTypedGen<"OpFAdd", 129, fadd, 1, 1>;
|
||||
|
||||
|
@ -618,8 +619,10 @@ def OpAtomicCompareExchange: Op<230, (outs ID:$res),
|
|||
(ins TYPE:$ty, ID:$ptr, ID:$sc, ID:$eq,
|
||||
ID:$neq, ID:$val, ID:$cmp),
|
||||
"$res = OpAtomicCompareExchange $ty $ptr $sc $eq $neq $val $cmp">;
|
||||
// TODO Currently the following deprecated opcode is missing:
|
||||
// OpAtomicCompareExchangeWeak
|
||||
def OpAtomicCompareExchangeWeak: Op<231, (outs ID:$res),
|
||||
(ins TYPE:$ty, ID:$ptr, ID:$sc, ID:$eq,
|
||||
ID:$neq, ID:$val, ID:$cmp),
|
||||
"$res = OpAtomicCompareExchangeWeak $ty $ptr $sc $eq $neq $val $cmp">;
|
||||
|
||||
def OpAtomicIIncrement: AtomicOp<"OpAtomicIIncrement", 232>;
|
||||
def OpAtomicIDecrement: AtomicOp<"OpAtomicIDecrement", 233>;
|
||||
|
@ -660,6 +663,11 @@ def OpMemoryNamedBarrier: Op<329, (outs), (ins ID:$barr, ID:$mem, ID:$sem),
|
|||
|
||||
// 3.42.21. Group and Subgroup Instructions
|
||||
|
||||
def OpGroupAsyncCopy: Op<259, (outs ID:$res), (ins TYPE:$ty, ID:$scope,
|
||||
ID:$dst, ID:$src, ID:$nelts, ID:$stride, ID:$event),
|
||||
"$res = OpGroupAsyncCopy $ty $scope $dst $src $nelts $stride $event">;
|
||||
def OpGroupWaitEvents: Op<260, (outs), (ins ID:$scope, ID:$nelts, ID:$elist),
|
||||
"OpGroupWaitEvents $scope $nelts $elist">;
|
||||
def OpGroupAll: Op<261, (outs ID:$res), (ins TYPE:$ty, ID:$scope, ID:$pr),
|
||||
"$res = OpGroupAll $ty $scope $pr">;
|
||||
def OpGroupAny: Op<262, (outs ID:$res), (ins TYPE:$ty, ID:$scope, ID:$pr),
|
||||
|
@ -680,6 +688,18 @@ def OpGroupUMax: OpGroup<"UMax", 270>;
|
|||
def OpGroupSMax: OpGroup<"SMax", 271>;
|
||||
|
||||
// TODO: 3.42.22. Device-Side Enqueue Instructions
|
||||
def OpRetainEvent: Op<297, (outs), (ins ID:$event), "OpRetainEvent $event">;
|
||||
def OpReleaseEvent: Op<298, (outs), (ins ID:$event), "OpReleaseEvent $event">;
|
||||
def OpCreateUserEvent: Op<299, (outs ID:$res), (ins TYPE:$type),
|
||||
"$res = OpCreateUserEvent $type">;
|
||||
def OpIsValidEvent: Op<300, (outs ID:$res), (ins TYPE:$type, ID:$event),
|
||||
"$res = OpIsValidEvent $type $event ">;
|
||||
def OpSetUserEventStatus: Op<301, (outs), (ins ID:$event, ID:$status),
|
||||
"OpSetUserEventStatus $event $status">;
|
||||
def OpCaptureEventProfilingInfo: Op<302, (outs),
|
||||
(ins ID:$event, ID:$info, ID:$value),
|
||||
"OpCaptureEventProfilingInfo $event $info $value">;
|
||||
|
||||
// TODO: 3.42.23. Pipe Instructions
|
||||
|
||||
// 3.42.24. Non-Uniform Instructions
|
||||
|
|
|
@ -197,6 +197,8 @@ void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
|
|||
InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
|
||||
}
|
||||
|
||||
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);
|
||||
|
||||
// Defined in SPIRVLegalizerInfo.cpp.
|
||||
extern bool isTypeFoldingSupported(unsigned Opcode);
|
||||
|
||||
|
@ -335,6 +337,30 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
|
|||
return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
|
||||
case TargetOpcode::G_ADDRSPACE_CAST:
|
||||
return selectAddrSpaceCast(ResVReg, ResType, I);
|
||||
case TargetOpcode::G_PTR_ADD: {
|
||||
// Currently, we get G_PTR_ADD only as a result of translating
|
||||
// global variables, initialized with constant expressions like GV + Const
|
||||
// (see test opencl/basic/progvar_prog_scope_init.ll).
|
||||
// TODO: extend the handler once we have other cases.
|
||||
assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
|
||||
Register GV = I.getOperand(1).getReg();
|
||||
MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
|
||||
assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
|
||||
(*II).getOpcode() == TargetOpcode::COPY ||
|
||||
(*II).getOpcode() == SPIRV::OpVariable) &&
|
||||
isImm(I.getOperand(2), MRI));
|
||||
Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
|
||||
MachineBasicBlock &BB = *I.getParent();
|
||||
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
|
||||
.addDef(ResVReg)
|
||||
.addUse(GR.getSPIRVTypeID(ResType))
|
||||
.addImm(static_cast<uint32_t>(
|
||||
SPIRV::Opcode::InBoundsPtrAccessChain))
|
||||
.addUse(GV)
|
||||
.addUse(Idx)
|
||||
.addUse(I.getOperand(2).getReg());
|
||||
return MIB.constrainAllUses(TII, TRI, RBI);
|
||||
}
|
||||
|
||||
case TargetOpcode::G_ATOMICRMW_OR:
|
||||
return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
|
||||
|
@ -387,23 +413,6 @@ bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
|
|||
Opcode);
|
||||
}
|
||||
|
||||
static SPIRV::MemorySemantics getMemSemantics(AtomicOrdering Ord) {
|
||||
switch (Ord) {
|
||||
case AtomicOrdering::Acquire:
|
||||
return SPIRV::MemorySemantics::Acquire;
|
||||
case AtomicOrdering::Release:
|
||||
return SPIRV::MemorySemantics::Release;
|
||||
case AtomicOrdering::AcquireRelease:
|
||||
return SPIRV::MemorySemantics::AcquireRelease;
|
||||
case AtomicOrdering::SequentiallyConsistent:
|
||||
return SPIRV::MemorySemantics::SequentiallyConsistent;
|
||||
case AtomicOrdering::Unordered:
|
||||
case AtomicOrdering::Monotonic:
|
||||
case AtomicOrdering::NotAtomic:
|
||||
return SPIRV::MemorySemantics::None;
|
||||
}
|
||||
}
|
||||
|
||||
static SPIRV::Scope getScope(SyncScope::ID Ord) {
|
||||
switch (Ord) {
|
||||
case SyncScope::SingleThread:
|
||||
|
@ -484,16 +493,15 @@ bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
|
|||
MachineInstr &I) const {
|
||||
MachineBasicBlock &BB = *I.getParent();
|
||||
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
|
||||
.addDef(I.getOperand(0).getReg())
|
||||
.addUse(I.getOperand(0).getReg())
|
||||
.addUse(I.getOperand(1).getReg())
|
||||
.addUse(I.getOperand(2).getReg());
|
||||
if (I.getNumMemOperands())
|
||||
addMemoryOperands(*I.memoperands_begin(), MIB);
|
||||
bool Result = MIB.constrainAllUses(TII, TRI, RBI);
|
||||
if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg()) {
|
||||
if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
|
||||
BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
|
||||
.addUse(MIB->getOperand(0).getReg());
|
||||
}
|
||||
return Result;
|
||||
}
|
||||
|
||||
|
@ -541,28 +549,39 @@ bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
|
|||
bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
|
||||
const SPIRVType *ResType,
|
||||
MachineInstr &I) const {
|
||||
Register ScopeReg;
|
||||
Register MemSemEqReg;
|
||||
Register MemSemNeqReg;
|
||||
Register Ptr = I.getOperand(2).getReg();
|
||||
if (I.getOpcode() != TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS) {
|
||||
assert(I.hasOneMemOperand());
|
||||
const MachineMemOperand *MemOp = *I.memoperands_begin();
|
||||
uint32_t Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
|
||||
Register ScopeReg = buildI32Constant(Scope, I);
|
||||
unsigned Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
|
||||
ScopeReg = buildI32Constant(Scope, I);
|
||||
|
||||
unsigned ScSem = static_cast<uint32_t>(
|
||||
getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
|
||||
AtomicOrdering AO = MemOp->getSuccessOrdering();
|
||||
unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
|
||||
MemSemEqReg = buildI32Constant(MemSemEq, I);
|
||||
AtomicOrdering FO = MemOp->getFailureOrdering();
|
||||
unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
|
||||
MemSemNeqReg =
|
||||
MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
|
||||
} else {
|
||||
ScopeReg = I.getOperand(5).getReg();
|
||||
MemSemEqReg = I.getOperand(6).getReg();
|
||||
MemSemNeqReg = I.getOperand(7).getReg();
|
||||
}
|
||||
|
||||
Register Ptr = I.getOperand(2).getReg();
|
||||
Register Cmp = I.getOperand(3).getReg();
|
||||
Register Val = I.getOperand(4).getReg();
|
||||
|
||||
SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
|
||||
SPIRV::StorageClass SC = GR.getPointerStorageClass(Ptr);
|
||||
uint32_t ScSem = static_cast<uint32_t>(getMemSemanticsForStorageClass(SC));
|
||||
AtomicOrdering AO = MemOp->getSuccessOrdering();
|
||||
uint32_t MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
|
||||
Register MemSemEqReg = buildI32Constant(MemSemEq, I);
|
||||
AtomicOrdering FO = MemOp->getFailureOrdering();
|
||||
uint32_t MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
|
||||
Register MemSemNeqReg =
|
||||
MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
|
||||
Register ACmpRes = MRI->createVirtualRegister(&SPIRV::IDRegClass);
|
||||
const DebugLoc &DL = I.getDebugLoc();
|
||||
return BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
|
||||
.addDef(ResVReg)
|
||||
bool Result =
|
||||
BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
|
||||
.addDef(ACmpRes)
|
||||
.addUse(GR.getSPIRVTypeID(SpvValTy))
|
||||
.addUse(Ptr)
|
||||
.addUse(ScopeReg)
|
||||
|
@ -571,6 +590,30 @@ bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
|
|||
.addUse(Val)
|
||||
.addUse(Cmp)
|
||||
.constrainAllUses(TII, TRI, RBI);
|
||||
Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
|
||||
SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
|
||||
Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
|
||||
.addDef(CmpSuccReg)
|
||||
.addUse(GR.getSPIRVTypeID(BoolTy))
|
||||
.addUse(ACmpRes)
|
||||
.addUse(Cmp)
|
||||
.constrainAllUses(TII, TRI, RBI);
|
||||
Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
|
||||
Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
|
||||
.addDef(TmpReg)
|
||||
.addUse(GR.getSPIRVTypeID(ResType))
|
||||
.addUse(ACmpRes)
|
||||
.addUse(GR.getOrCreateUndef(I, ResType, TII))
|
||||
.addImm(0)
|
||||
.constrainAllUses(TII, TRI, RBI);
|
||||
Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
|
||||
.addDef(ResVReg)
|
||||
.addUse(GR.getSPIRVTypeID(ResType))
|
||||
.addUse(CmpSuccReg)
|
||||
.addUse(TmpReg)
|
||||
.addImm(1)
|
||||
.constrainAllUses(TII, TRI, RBI);
|
||||
return Result;
|
||||
}
|
||||
|
||||
static bool isGenericCastablePtr(SPIRV::StorageClass SC) {
|
||||
|
@ -592,6 +635,27 @@ static bool isGenericCastablePtr(SPIRV::StorageClass SC) {
|
|||
bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
|
||||
const SPIRVType *ResType,
|
||||
MachineInstr &I) const {
|
||||
// If the AddrSpaceCast user is single and in OpConstantComposite or
|
||||
// OpVariable, we should select OpSpecConstantOp.
|
||||
auto UIs = MRI->use_instructions(ResVReg);
|
||||
if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
|
||||
(UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
|
||||
UIs.begin()->getOpcode() == SPIRV::OpVariable ||
|
||||
isSpvIntrinsic(*UIs.begin(), Intrinsic::spv_init_global))) {
|
||||
Register NewReg = I.getOperand(1).getReg();
|
||||
MachineBasicBlock &BB = *I.getParent();
|
||||
SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
|
||||
ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII,
|
||||
SPIRV::StorageClass::Generic);
|
||||
bool Result =
|
||||
BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
|
||||
.addDef(ResVReg)
|
||||
.addUse(GR.getSPIRVTypeID(ResType))
|
||||
.addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric))
|
||||
.addUse(NewReg)
|
||||
.constrainAllUses(TII, TRI, RBI);
|
||||
return Result;
|
||||
}
|
||||
Register SrcPtr = I.getOperand(1).getReg();
|
||||
SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
|
||||
SPIRV::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
|
||||
|
@ -842,7 +906,9 @@ bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
|
|||
|
||||
Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
|
||||
MachineInstr &I) const {
|
||||
return buildI32Constant(0, I, ResType);
|
||||
if (ResType->getOpcode() == SPIRV::OpTypeVector)
|
||||
return GR.getOrCreateConsIntVector(0, I, ResType, TII);
|
||||
return GR.getOrCreateConstInt(0, I, ResType, TII);
|
||||
}
|
||||
|
||||
Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
|
||||
|
@ -851,20 +917,9 @@ Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
|
|||
unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
|
||||
APInt One = AllOnes ? APInt::getAllOnesValue(BitWidth)
|
||||
: APInt::getOneBitSet(BitWidth, 0);
|
||||
Register OneReg = buildI32Constant(One.getZExtValue(), I, ResType);
|
||||
if (ResType->getOpcode() == SPIRV::OpTypeVector) {
|
||||
const unsigned NumEles = ResType->getOperand(2).getImm();
|
||||
Register OneVec = MRI->createVirtualRegister(&SPIRV::IDRegClass);
|
||||
unsigned Opcode = SPIRV::OpConstantComposite;
|
||||
auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
|
||||
.addDef(OneVec)
|
||||
.addUse(GR.getSPIRVTypeID(ResType));
|
||||
for (unsigned i = 0; i < NumEles; ++i)
|
||||
MIB.addUse(OneReg);
|
||||
constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
|
||||
return OneVec;
|
||||
}
|
||||
return OneReg;
|
||||
if (ResType->getOpcode() == SPIRV::OpTypeVector)
|
||||
return GR.getOrCreateConsIntVector(One.getZExtValue(), I, ResType, TII);
|
||||
return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
|
||||
}
|
||||
|
||||
bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
|
||||
|
@ -959,13 +1014,23 @@ bool SPIRVInstructionSelector::selectConst(Register ResVReg,
|
|||
const SPIRVType *ResType,
|
||||
const APInt &Imm,
|
||||
MachineInstr &I) const {
|
||||
assert(ResType->getOpcode() != SPIRV::OpTypePointer || Imm.isNullValue());
|
||||
unsigned TyOpcode = ResType->getOpcode();
|
||||
assert(TyOpcode != SPIRV::OpTypePointer || Imm.isNullValue());
|
||||
MachineBasicBlock &BB = *I.getParent();
|
||||
if (ResType->getOpcode() == SPIRV::OpTypePointer && Imm.isNullValue()) {
|
||||
if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
|
||||
Imm.isNullValue())
|
||||
return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
|
||||
.addDef(ResVReg)
|
||||
.addUse(GR.getSPIRVTypeID(ResType))
|
||||
.constrainAllUses(TII, TRI, RBI);
|
||||
if (TyOpcode == SPIRV::OpTypeInt) {
|
||||
Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
|
||||
if (Reg == ResVReg)
|
||||
return true;
|
||||
return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
|
||||
.addDef(ResVReg)
|
||||
.addUse(Reg)
|
||||
.constrainAllUses(TII, TRI, RBI);
|
||||
}
|
||||
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
|
||||
.addDef(ResVReg)
|
||||
|
@ -1006,29 +1071,29 @@ bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
|
|||
const SPIRVType *ResType,
|
||||
MachineInstr &I) const {
|
||||
MachineBasicBlock &BB = *I.getParent();
|
||||
return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
|
||||
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
|
||||
.addDef(ResVReg)
|
||||
.addUse(GR.getSPIRVTypeID(ResType))
|
||||
// object to insert
|
||||
.addUse(I.getOperand(3).getReg())
|
||||
// composite to insert into
|
||||
.addUse(I.getOperand(2).getReg())
|
||||
// TODO: support arbitrary number of indices
|
||||
.addImm(foldImm(I.getOperand(4), MRI))
|
||||
.constrainAllUses(TII, TRI, RBI);
|
||||
.addUse(I.getOperand(2).getReg());
|
||||
for (unsigned i = 4; i < I.getNumOperands(); i++)
|
||||
MIB.addImm(foldImm(I.getOperand(i), MRI));
|
||||
return MIB.constrainAllUses(TII, TRI, RBI);
|
||||
}
|
||||
|
||||
bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
|
||||
const SPIRVType *ResType,
|
||||
MachineInstr &I) const {
|
||||
MachineBasicBlock &BB = *I.getParent();
|
||||
return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
|
||||
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
|
||||
.addDef(ResVReg)
|
||||
.addUse(GR.getSPIRVTypeID(ResType))
|
||||
.addUse(I.getOperand(2).getReg())
|
||||
// TODO: support arbitrary number of indices
|
||||
.addImm(foldImm(I.getOperand(3), MRI))
|
||||
.constrainAllUses(TII, TRI, RBI);
|
||||
.addUse(I.getOperand(2).getReg());
|
||||
for (unsigned i = 3; i < I.getNumOperands(); i++)
|
||||
MIB.addImm(foldImm(I.getOperand(i), MRI));
|
||||
return MIB.constrainAllUses(TII, TRI, RBI);
|
||||
}
|
||||
|
||||
bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
|
||||
|
@ -1154,6 +1219,9 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
|
|||
}
|
||||
return MIB.constrainAllUses(TII, TRI, RBI);
|
||||
} break;
|
||||
case Intrinsic::spv_cmpxchg:
|
||||
return selectAtomicCmpXchg(ResVReg, ResType, I);
|
||||
break;
|
||||
default:
|
||||
llvm_unreachable("Intrinsic selection not implemented");
|
||||
}
|
||||
|
@ -1239,8 +1307,32 @@ bool SPIRVInstructionSelector::selectGlobalValue(
|
|||
GV->getType(), MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
|
||||
|
||||
std::string GlobalIdent = GV->getGlobalIdentifier();
|
||||
// TODO: suport @llvm.global.annotations.
|
||||
// We have functions as operands in tests with blocks of instruction e.g. in
|
||||
// transcoding/global_block.ll. These operands are not used and should be
|
||||
// substituted by zero constants. Their type is expected to be always
|
||||
// OpTypePointer Function %uchar.
|
||||
if (isa<Function>(GV)) {
|
||||
const Constant *ConstVal = GV;
|
||||
MachineBasicBlock &BB = *I.getParent();
|
||||
Register NewReg = GR.find(ConstVal, GR.CurMF);
|
||||
if (!NewReg.isValid()) {
|
||||
SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
|
||||
ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII);
|
||||
Register NewReg = ResVReg;
|
||||
GR.add(ConstVal, GR.CurMF, NewReg);
|
||||
return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
|
||||
.addDef(NewReg)
|
||||
.addUse(GR.getSPIRVTypeID(ResType))
|
||||
.constrainAllUses(TII, TRI, RBI);
|
||||
}
|
||||
assert(NewReg != ResVReg);
|
||||
return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
|
||||
.addDef(ResVReg)
|
||||
.addUse(NewReg)
|
||||
.constrainAllUses(TII, TRI, RBI);
|
||||
}
|
||||
auto GlobalVar = cast<GlobalVariable>(GV);
|
||||
assert(GlobalVar->getName() != "llvm.global.annotations");
|
||||
|
||||
bool HasInit = GlobalVar->hasInitializer() &&
|
||||
!isa<UndefValue>(GlobalVar->getInitializer());
|
||||
|
|
|
@ -45,7 +45,12 @@ void SPIRVMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI,
|
|||
break;
|
||||
}
|
||||
case MachineOperand::MO_Immediate:
|
||||
if (MI->getOpcode() == SPIRV::OpExtInst && i == 2) {
|
||||
Register Reg = MAI->getExtInstSetReg(MO.getImm());
|
||||
MCOp = MCOperand::createReg(Reg);
|
||||
} else {
|
||||
MCOp = MCOperand::createImm(MO.getImm());
|
||||
}
|
||||
break;
|
||||
case MachineOperand::MO_FPImmediate:
|
||||
MCOp = MCOperand::createDFPImm(
|
||||
|
|
|
@ -60,62 +60,50 @@ void SPIRVModuleAnalysis::setBaseInfo(const Module &M) {
|
|||
MAI.InstrsToDelete.clear();
|
||||
MAI.FuncNameMap.clear();
|
||||
MAI.GlobalVarList.clear();
|
||||
MAI.ExtInstSetMap.clear();
|
||||
|
||||
// TODO: determine memory model and source language from the configuratoin.
|
||||
if (auto MemModel = M.getNamedMetadata("spirv.MemoryModel")) {
|
||||
auto MemMD = MemModel->getOperand(0);
|
||||
MAI.Addr = static_cast<SPIRV::AddressingModel>(getMetadataUInt(MemMD, 0));
|
||||
MAI.Mem = static_cast<SPIRV::MemoryModel>(getMetadataUInt(MemMD, 1));
|
||||
} else {
|
||||
MAI.Mem = SPIRV::MemoryModel::OpenCL;
|
||||
MAI.SrcLang = SPIRV::SourceLanguage::OpenCL_C;
|
||||
unsigned PtrSize = ST->getPointerSize();
|
||||
MAI.Addr = PtrSize == 32 ? SPIRV::AddressingModel::Physical32
|
||||
: PtrSize == 64 ? SPIRV::AddressingModel::Physical64
|
||||
: SPIRV::AddressingModel::Logical;
|
||||
}
|
||||
// Get the OpenCL version number from metadata.
|
||||
// TODO: support other source languages.
|
||||
MAI.SrcLangVersion = 0;
|
||||
if (auto VerNode = M.getNamedMetadata("opencl.ocl.version")) {
|
||||
// Construct version literal according to OpenCL 2.2 environment spec.
|
||||
MAI.SrcLang = SPIRV::SourceLanguage::OpenCL_C;
|
||||
// Construct version literal in accordance with SPIRV-LLVM-Translator.
|
||||
// TODO: support multiple OCL version metadata.
|
||||
assert(VerNode->getNumOperands() > 0 && "Invalid SPIR");
|
||||
auto VersionMD = VerNode->getOperand(0);
|
||||
unsigned MajorNum = getMetadataUInt(VersionMD, 0, 2);
|
||||
unsigned MinorNum = getMetadataUInt(VersionMD, 1);
|
||||
unsigned RevNum = getMetadataUInt(VersionMD, 2);
|
||||
MAI.SrcLangVersion = 0 | (MajorNum << 16) | (MinorNum << 8) | RevNum;
|
||||
}
|
||||
}
|
||||
|
||||
// True if there is an instruction in the MS list with all the same operands as
|
||||
// the given instruction has (after the given starting index).
|
||||
// TODO: maybe it needs to check Opcodes too.
|
||||
static bool findSameInstrInMS(const MachineInstr &A,
|
||||
SPIRV::ModuleSectionType MSType,
|
||||
SPIRV::ModuleAnalysisInfo &MAI,
|
||||
bool UpdateRegAliases,
|
||||
unsigned StartOpIndex = 0) {
|
||||
for (const auto *B : MAI.MS[MSType]) {
|
||||
const unsigned NumAOps = A.getNumOperands();
|
||||
if (NumAOps == B->getNumOperands() && A.getNumDefs() == B->getNumDefs()) {
|
||||
bool AllOpsMatch = true;
|
||||
for (unsigned i = StartOpIndex; i < NumAOps && AllOpsMatch; ++i) {
|
||||
if (A.getOperand(i).isReg() && B->getOperand(i).isReg()) {
|
||||
Register RegA = A.getOperand(i).getReg();
|
||||
Register RegB = B->getOperand(i).getReg();
|
||||
AllOpsMatch = MAI.getRegisterAlias(A.getMF(), RegA) ==
|
||||
MAI.getRegisterAlias(B->getMF(), RegB);
|
||||
MAI.SrcLangVersion = (MajorNum * 100 + MinorNum) * 1000 + RevNum;
|
||||
} else {
|
||||
AllOpsMatch = A.getOperand(i).isIdenticalTo(B->getOperand(i));
|
||||
MAI.SrcLang = SPIRV::SourceLanguage::Unknown;
|
||||
MAI.SrcLangVersion = 0;
|
||||
}
|
||||
|
||||
if (auto ExtNode = M.getNamedMetadata("opencl.used.extensions")) {
|
||||
for (unsigned I = 0, E = ExtNode->getNumOperands(); I != E; ++I) {
|
||||
MDNode *MD = ExtNode->getOperand(I);
|
||||
if (!MD || MD->getNumOperands() == 0)
|
||||
continue;
|
||||
for (unsigned J = 0, N = MD->getNumOperands(); J != N; ++J)
|
||||
MAI.SrcExt.insert(cast<MDString>(MD->getOperand(J))->getString());
|
||||
}
|
||||
}
|
||||
if (AllOpsMatch) {
|
||||
if (UpdateRegAliases) {
|
||||
assert(A.getOperand(0).isReg() && B->getOperand(0).isReg());
|
||||
Register LocalReg = A.getOperand(0).getReg();
|
||||
Register GlobalReg =
|
||||
MAI.getRegisterAlias(B->getMF(), B->getOperand(0).getReg());
|
||||
MAI.setRegisterAlias(A.getMF(), LocalReg, GlobalReg);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
|
||||
// TODO: check if it's required by default.
|
||||
MAI.ExtInstSetMap[static_cast<unsigned>(SPIRV::InstructionSet::OpenCL_std)] =
|
||||
Register::index2VirtReg(MAI.getNextID());
|
||||
}
|
||||
|
||||
// Collect MI which defines the register in the given machine function.
|
||||
|
@ -135,7 +123,7 @@ void SPIRVModuleAnalysis::collectGlobalEntities(
|
|||
const std::vector<SPIRV::DTSortableEntry *> &DepsGraph,
|
||||
SPIRV::ModuleSectionType MSType,
|
||||
std::function<bool(const SPIRV::DTSortableEntry *)> Pred,
|
||||
bool UsePreOrder) {
|
||||
bool UsePreOrder = false) {
|
||||
DenseSet<const SPIRV::DTSortableEntry *> Visited;
|
||||
for (const auto *E : DepsGraph) {
|
||||
std::function<void(const SPIRV::DTSortableEntry *)> RecHoistUtil;
|
||||
|
@ -188,13 +176,41 @@ void SPIRVModuleAnalysis::processDefInstrs(const Module &M) {
|
|||
|
||||
collectGlobalEntities(
|
||||
DepsGraph, SPIRV::MB_TypeConstVars,
|
||||
[](const SPIRV::DTSortableEntry *E) { return !E->getIsFunc(); }, false);
|
||||
[](const SPIRV::DTSortableEntry *E) { return !E->getIsFunc(); });
|
||||
|
||||
collectGlobalEntities(
|
||||
DepsGraph, SPIRV::MB_ExtFuncDecls,
|
||||
[](const SPIRV::DTSortableEntry *E) { return E->getIsFunc(); }, true);
|
||||
}
|
||||
|
||||
// True if there is an instruction in the MS list with all the same operands as
|
||||
// the given instruction has (after the given starting index).
|
||||
// TODO: maybe it needs to check Opcodes too.
|
||||
static bool findSameInstrInMS(const MachineInstr &A,
|
||||
SPIRV::ModuleSectionType MSType,
|
||||
SPIRV::ModuleAnalysisInfo &MAI,
|
||||
unsigned StartOpIndex = 0) {
|
||||
for (const auto *B : MAI.MS[MSType]) {
|
||||
const unsigned NumAOps = A.getNumOperands();
|
||||
if (NumAOps != B->getNumOperands() || A.getNumDefs() != B->getNumDefs())
|
||||
continue;
|
||||
bool AllOpsMatch = true;
|
||||
for (unsigned i = StartOpIndex; i < NumAOps && AllOpsMatch; ++i) {
|
||||
if (A.getOperand(i).isReg() && B->getOperand(i).isReg()) {
|
||||
Register RegA = A.getOperand(i).getReg();
|
||||
Register RegB = B->getOperand(i).getReg();
|
||||
AllOpsMatch = MAI.getRegisterAlias(A.getMF(), RegA) ==
|
||||
MAI.getRegisterAlias(B->getMF(), RegB);
|
||||
} else {
|
||||
AllOpsMatch = A.getOperand(i).isIdenticalTo(B->getOperand(i));
|
||||
}
|
||||
}
|
||||
if (AllOpsMatch)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Look for IDs declared with Import linkage, and map the imported name string
|
||||
// to the register defining that variable (which will usually be the result of
|
||||
// an OpFunction). This lets us call externally imported functions using
|
||||
|
@ -228,12 +244,16 @@ void SPIRVModuleAnalysis::collectFuncNames(MachineInstr &MI,
|
|||
// numbering has already occurred by this point. We can directly compare reg
|
||||
// arguments when detecting duplicates.
|
||||
static void collectOtherInstr(MachineInstr &MI, SPIRV::ModuleAnalysisInfo &MAI,
|
||||
SPIRV::ModuleSectionType MSType) {
|
||||
SPIRV::ModuleSectionType MSType,
|
||||
bool Append = true) {
|
||||
MAI.setSkipEmission(&MI);
|
||||
if (findSameInstrInMS(MI, MSType, MAI, false))
|
||||
if (findSameInstrInMS(MI, MSType, MAI))
|
||||
return; // Found a duplicate, so don't add it.
|
||||
// No duplicates, so add it.
|
||||
if (Append)
|
||||
MAI.MS[MSType].push_back(&MI);
|
||||
else
|
||||
MAI.MS[MSType].insert(MAI.MS[MSType].begin(), &MI);
|
||||
}
|
||||
|
||||
// Some global instructions make reference to function-local ID regs, so cannot
|
||||
|
@ -256,15 +276,22 @@ void SPIRVModuleAnalysis::processOtherInstrs(const Module &M) {
|
|||
} else if (TII->isDecorationInstr(MI)) {
|
||||
collectOtherInstr(MI, MAI, SPIRV::MB_Annotations);
|
||||
collectFuncNames(MI, *F);
|
||||
} else if (TII->isConstantInstr(MI)) {
|
||||
// Now OpSpecConstant*s are not in DT,
|
||||
// but they need to be collected anyway.
|
||||
collectOtherInstr(MI, MAI, SPIRV::MB_TypeConstVars);
|
||||
} else if (OpCode == SPIRV::OpFunction) {
|
||||
collectFuncNames(MI, *F);
|
||||
} else if (OpCode == SPIRV::OpTypeForwardPointer) {
|
||||
collectOtherInstr(MI, MAI, SPIRV::MB_TypeConstVars, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Number registers in all functions globally from 0 onwards and store
|
||||
// the result in global register alias table.
|
||||
// the result in global register alias table. Some registers are already
|
||||
// numbered in collectGlobalEntities.
|
||||
void SPIRVModuleAnalysis::numberRegistersGlobally(const Module &M) {
|
||||
for (auto F = M.begin(), E = M.end(); F != E; ++F) {
|
||||
if ((*F).isDeclaration())
|
||||
|
@ -282,11 +309,50 @@ void SPIRVModuleAnalysis::numberRegistersGlobally(const Module &M) {
|
|||
Register NewReg = Register::index2VirtReg(MAI.getNextID());
|
||||
MAI.setRegisterAlias(MF, Reg, NewReg);
|
||||
}
|
||||
if (MI.getOpcode() != SPIRV::OpExtInst)
|
||||
continue;
|
||||
auto Set = MI.getOperand(2).getImm();
|
||||
if (MAI.ExtInstSetMap.find(Set) == MAI.ExtInstSetMap.end())
|
||||
MAI.ExtInstSetMap[Set] = Register::index2VirtReg(MAI.getNextID());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Find OpIEqual and OpBranchConditional instructions originating from
|
||||
// OpSwitches, mark them skipped for emission. Also mark MBB skipped if it
|
||||
// contains only these instructions.
|
||||
static void processSwitches(const Module &M, SPIRV::ModuleAnalysisInfo &MAI,
|
||||
MachineModuleInfo *MMI) {
|
||||
DenseSet<Register> SwitchRegs;
|
||||
for (auto F = M.begin(), E = M.end(); F != E; ++F) {
|
||||
MachineFunction *MF = MMI->getMachineFunction(*F);
|
||||
if (!MF)
|
||||
continue;
|
||||
for (MachineBasicBlock &MBB : *MF)
|
||||
for (MachineInstr &MI : MBB) {
|
||||
if (MAI.getSkipEmission(&MI))
|
||||
continue;
|
||||
if (MI.getOpcode() == SPIRV::OpSwitch) {
|
||||
assert(MI.getOperand(0).isReg());
|
||||
SwitchRegs.insert(MI.getOperand(0).getReg());
|
||||
}
|
||||
if (MI.getOpcode() != SPIRV::OpIEqual || !MI.getOperand(2).isReg() ||
|
||||
!SwitchRegs.contains(MI.getOperand(2).getReg()))
|
||||
continue;
|
||||
Register CmpReg = MI.getOperand(0).getReg();
|
||||
MachineInstr *CBr = MI.getNextNode();
|
||||
assert(CBr && CBr->getOpcode() == SPIRV::OpBranchConditional &&
|
||||
CBr->getOperand(0).isReg() &&
|
||||
CBr->getOperand(0).getReg() == CmpReg);
|
||||
MAI.setSkipEmission(&MI);
|
||||
MAI.setSkipEmission(CBr);
|
||||
if (&MBB.front() == &MI && &MBB.back() == CBr)
|
||||
MAI.MBBsToSkip.insert(&MBB);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct SPIRV::ModuleAnalysisInfo SPIRVModuleAnalysis::MAI;
|
||||
|
||||
void SPIRVModuleAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
|
||||
|
@ -305,7 +371,9 @@ bool SPIRVModuleAnalysis::runOnModule(Module &M) {
|
|||
|
||||
setBaseInfo(M);
|
||||
|
||||
// TODO: Process type/const/global var/func decl instructions, number their
|
||||
processSwitches(M, MAI, MMI);
|
||||
|
||||
// Process type/const/global var/func decl instructions, number their
|
||||
// destination registers from 0 to N, collect Extensions and Capabilities.
|
||||
processDefInstrs(M);
|
||||
|
||||
|
|
|
@ -52,6 +52,9 @@ struct ModuleAnalysisInfo {
|
|||
SPIRV::AddressingModel Addr;
|
||||
SPIRV::SourceLanguage SrcLang;
|
||||
unsigned SrcLangVersion;
|
||||
StringSet<> SrcExt;
|
||||
// Maps ExtInstSet to corresponding ID register.
|
||||
DenseMap<unsigned, Register> ExtInstSetMap;
|
||||
// Contains the list of all global OpVariables in the module.
|
||||
SmallVector<MachineInstr *, 4> GlobalVarList;
|
||||
// Maps function names to coresponding function ID registers.
|
||||
|
@ -59,6 +62,9 @@ struct ModuleAnalysisInfo {
|
|||
// The set contains machine instructions which are necessary
|
||||
// for correct MIR but will not be emitted in function bodies.
|
||||
DenseSet<MachineInstr *> InstrsToDelete;
|
||||
// The set contains machine basic blocks which are necessary
|
||||
// for correct MIR but will not be emitted.
|
||||
DenseSet<MachineBasicBlock *> MBBsToSkip;
|
||||
// The table contains global aliases of local registers for each machine
|
||||
// function. The aliases are used to substitute local registers during
|
||||
// code emission.
|
||||
|
@ -75,6 +81,7 @@ struct ModuleAnalysisInfo {
|
|||
assert(FuncReg != FuncNameMap.end() && "Cannot find function Id");
|
||||
return FuncReg->second;
|
||||
}
|
||||
Register getExtInstSetReg(unsigned SetNum) { return ExtInstSetMap[SetNum]; }
|
||||
InstrList &getMSInstrs(unsigned MSType) { return MS[MSType]; }
|
||||
void setSkipEmission(MachineInstr *MI) { InstrsToDelete.insert(MI); }
|
||||
bool getSkipEmission(const MachineInstr *MI) {
|
||||
|
@ -123,7 +130,6 @@ public:
|
|||
|
||||
private:
|
||||
void setBaseInfo(const Module &M);
|
||||
template <typename T> void collectTypesConstsVars();
|
||||
void collectGlobalEntities(
|
||||
const std::vector<SPIRV::DTSortableEntry *> &DepsGraph,
|
||||
SPIRV::ModuleSectionType MSType,
|
||||
|
|
|
@ -39,11 +39,58 @@ public:
|
|||
};
|
||||
} // namespace
|
||||
|
||||
static bool isSpvIntrinsic(MachineInstr &MI, Intrinsic::ID IntrinsicID) {
|
||||
if (MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS &&
|
||||
MI.getIntrinsicID() == IntrinsicID)
|
||||
return true;
|
||||
return false;
|
||||
static void addConstantsToTrack(MachineFunction &MF, SPIRVGlobalRegistry *GR) {
|
||||
MachineRegisterInfo &MRI = MF.getRegInfo();
|
||||
DenseMap<MachineInstr *, Register> RegsAlreadyAddedToDT;
|
||||
SmallVector<MachineInstr *, 10> ToErase, ToEraseComposites;
|
||||
for (MachineBasicBlock &MBB : MF) {
|
||||
for (MachineInstr &MI : MBB) {
|
||||
if (!isSpvIntrinsic(MI, Intrinsic::spv_track_constant))
|
||||
continue;
|
||||
ToErase.push_back(&MI);
|
||||
auto *Const =
|
||||
cast<Constant>(cast<ConstantAsMetadata>(
|
||||
MI.getOperand(3).getMetadata()->getOperand(0))
|
||||
->getValue());
|
||||
if (auto *GV = dyn_cast<GlobalValue>(Const)) {
|
||||
Register Reg = GR->find(GV, &MF);
|
||||
if (!Reg.isValid())
|
||||
GR->add(GV, &MF, MI.getOperand(2).getReg());
|
||||
else
|
||||
RegsAlreadyAddedToDT[&MI] = Reg;
|
||||
} else {
|
||||
Register Reg = GR->find(Const, &MF);
|
||||
if (!Reg.isValid()) {
|
||||
if (auto *ConstVec = dyn_cast<ConstantDataVector>(Const)) {
|
||||
auto *BuildVec = MRI.getVRegDef(MI.getOperand(2).getReg());
|
||||
assert(BuildVec &&
|
||||
BuildVec->getOpcode() == TargetOpcode::G_BUILD_VECTOR);
|
||||
for (unsigned i = 0; i < ConstVec->getNumElements(); ++i)
|
||||
GR->add(ConstVec->getElementAsConstant(i), &MF,
|
||||
BuildVec->getOperand(1 + i).getReg());
|
||||
}
|
||||
GR->add(Const, &MF, MI.getOperand(2).getReg());
|
||||
} else {
|
||||
RegsAlreadyAddedToDT[&MI] = Reg;
|
||||
// This MI is unused and will be removed. If the MI uses
|
||||
// const_composite, it will be unused and should be removed too.
|
||||
assert(MI.getOperand(2).isReg() && "Reg operand is expected");
|
||||
MachineInstr *SrcMI = MRI.getVRegDef(MI.getOperand(2).getReg());
|
||||
if (SrcMI && isSpvIntrinsic(*SrcMI, Intrinsic::spv_const_composite))
|
||||
ToEraseComposites.push_back(SrcMI);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for (MachineInstr *MI : ToErase) {
|
||||
Register Reg = MI->getOperand(2).getReg();
|
||||
if (RegsAlreadyAddedToDT.find(MI) != RegsAlreadyAddedToDT.end())
|
||||
Reg = RegsAlreadyAddedToDT[MI];
|
||||
MRI.replaceRegWith(MI->getOperand(0).getReg(), Reg);
|
||||
MI->eraseFromParent();
|
||||
}
|
||||
for (MachineInstr *MI : ToEraseComposites)
|
||||
MI->eraseFromParent();
|
||||
}
|
||||
|
||||
static void foldConstantsIntoIntrinsics(MachineFunction &MF) {
|
||||
|
@ -120,6 +167,7 @@ static SPIRVType *propagateSPIRVType(MachineInstr *MI, SPIRVGlobalRegistry *GR,
|
|||
}
|
||||
case TargetOpcode::G_TRUNC:
|
||||
case TargetOpcode::G_ADDRSPACE_CAST:
|
||||
case TargetOpcode::G_PTR_ADD:
|
||||
case TargetOpcode::COPY: {
|
||||
MachineOperand &Op = MI->getOperand(1);
|
||||
MachineInstr *Def = Op.isReg() ? MRI.getVRegDef(Op.getReg()) : nullptr;
|
||||
|
@ -308,6 +356,22 @@ static void processInstrsWithTypeFolding(MachineFunction &MF,
|
|||
processInstr(MI, MIB, MRI, GR);
|
||||
}
|
||||
}
|
||||
for (MachineBasicBlock &MBB : MF) {
|
||||
for (MachineInstr &MI : MBB) {
|
||||
// We need to rewrite dst types for ASSIGN_TYPE instrs to be able
|
||||
// to perform tblgen'erated selection and we can't do that on Legalizer
|
||||
// as it operates on gMIR only.
|
||||
if (MI.getOpcode() != SPIRV::ASSIGN_TYPE)
|
||||
continue;
|
||||
Register SrcReg = MI.getOperand(1).getReg();
|
||||
if (!isTypeFoldingSupported(MRI.getVRegDef(SrcReg)->getOpcode()))
|
||||
continue;
|
||||
Register DstReg = MI.getOperand(0).getReg();
|
||||
if (MRI.getType(DstReg).isVector())
|
||||
MRI.setRegClass(DstReg, &SPIRV::IDRegClass);
|
||||
MRI.setType(DstReg, LLT::scalar(32));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void processSwitches(MachineFunction &MF, SPIRVGlobalRegistry *GR,
|
||||
|
@ -421,6 +485,7 @@ bool SPIRVPreLegalizer::runOnMachineFunction(MachineFunction &MF) {
|
|||
SPIRVGlobalRegistry *GR = ST.getSPIRVGlobalRegistry();
|
||||
GR->setCurrentFunc(MF);
|
||||
MachineIRBuilder MIB(MF);
|
||||
addConstantsToTrack(MF, GR);
|
||||
foldConstantsIntoIntrinsics(MF);
|
||||
insertBitcasts(MF, GR, MIB);
|
||||
generateAssignInstrs(MF, GR, MIB);
|
||||
|
|
|
@ -0,0 +1,288 @@
|
|||
//===-- SPIRVPrepareFunctions.cpp - modify function signatures --*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This pass modifies function signatures containing aggregate arguments
|
||||
// and/or return value. Also it substitutes some llvm intrinsic calls by
|
||||
// function calls, generating these functions as the translator does.
|
||||
//
|
||||
// NOTE: this pass is a module-level one due to the necessity to modify
|
||||
// GVs/functions.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "SPIRV.h"
|
||||
#include "SPIRVTargetMachine.h"
|
||||
#include "SPIRVUtils.h"
|
||||
#include "llvm/IR/IRBuilder.h"
|
||||
#include "llvm/IR/IntrinsicInst.h"
|
||||
#include "llvm/Transforms/Utils/Cloning.h"
|
||||
#include "llvm/Transforms/Utils/LowerMemIntrinsics.h"
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
namespace llvm {
|
||||
void initializeSPIRVPrepareFunctionsPass(PassRegistry &);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
class SPIRVPrepareFunctions : public ModulePass {
|
||||
Function *processFunctionSignature(Function *F);
|
||||
|
||||
public:
|
||||
static char ID;
|
||||
SPIRVPrepareFunctions() : ModulePass(ID) {
|
||||
initializeSPIRVPrepareFunctionsPass(*PassRegistry::getPassRegistry());
|
||||
}
|
||||
|
||||
bool runOnModule(Module &M) override;
|
||||
|
||||
StringRef getPassName() const override { return "SPIRV prepare functions"; }
|
||||
|
||||
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
||||
ModulePass::getAnalysisUsage(AU);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
char SPIRVPrepareFunctions::ID = 0;
|
||||
|
||||
INITIALIZE_PASS(SPIRVPrepareFunctions, "prepare-functions",
|
||||
"SPIRV prepare functions", false, false)
|
||||
|
||||
Function *SPIRVPrepareFunctions::processFunctionSignature(Function *F) {
|
||||
IRBuilder<> B(F->getContext());
|
||||
|
||||
bool IsRetAggr = F->getReturnType()->isAggregateType();
|
||||
bool HasAggrArg =
|
||||
std::any_of(F->arg_begin(), F->arg_end(), [](Argument &Arg) {
|
||||
return Arg.getType()->isAggregateType();
|
||||
});
|
||||
bool DoClone = IsRetAggr || HasAggrArg;
|
||||
if (!DoClone)
|
||||
return F;
|
||||
SmallVector<std::pair<int, Type *>, 4> ChangedTypes;
|
||||
Type *RetType = IsRetAggr ? B.getInt32Ty() : F->getReturnType();
|
||||
if (IsRetAggr)
|
||||
ChangedTypes.push_back(std::pair<int, Type *>(-1, F->getReturnType()));
|
||||
SmallVector<Type *, 4> ArgTypes;
|
||||
for (const auto &Arg : F->args()) {
|
||||
if (Arg.getType()->isAggregateType()) {
|
||||
ArgTypes.push_back(B.getInt32Ty());
|
||||
ChangedTypes.push_back(
|
||||
std::pair<int, Type *>(Arg.getArgNo(), Arg.getType()));
|
||||
} else
|
||||
ArgTypes.push_back(Arg.getType());
|
||||
}
|
||||
FunctionType *NewFTy =
|
||||
FunctionType::get(RetType, ArgTypes, F->getFunctionType()->isVarArg());
|
||||
Function *NewF =
|
||||
Function::Create(NewFTy, F->getLinkage(), F->getName(), *F->getParent());
|
||||
|
||||
ValueToValueMapTy VMap;
|
||||
auto NewFArgIt = NewF->arg_begin();
|
||||
for (auto &Arg : F->args()) {
|
||||
StringRef ArgName = Arg.getName();
|
||||
NewFArgIt->setName(ArgName);
|
||||
VMap[&Arg] = &(*NewFArgIt++);
|
||||
}
|
||||
SmallVector<ReturnInst *, 8> Returns;
|
||||
|
||||
CloneFunctionInto(NewF, F, VMap, CloneFunctionChangeType::LocalChangesOnly,
|
||||
Returns);
|
||||
NewF->takeName(F);
|
||||
|
||||
NamedMDNode *FuncMD =
|
||||
F->getParent()->getOrInsertNamedMetadata("spv.cloned_funcs");
|
||||
SmallVector<Metadata *, 2> MDArgs;
|
||||
MDArgs.push_back(MDString::get(B.getContext(), NewF->getName()));
|
||||
for (auto &ChangedTyP : ChangedTypes)
|
||||
MDArgs.push_back(MDNode::get(
|
||||
B.getContext(),
|
||||
{ConstantAsMetadata::get(B.getInt32(ChangedTyP.first)),
|
||||
ValueAsMetadata::get(Constant::getNullValue(ChangedTyP.second))}));
|
||||
MDNode *ThisFuncMD = MDNode::get(B.getContext(), MDArgs);
|
||||
FuncMD->addOperand(ThisFuncMD);
|
||||
|
||||
for (auto *U : make_early_inc_range(F->users())) {
|
||||
if (auto *CI = dyn_cast<CallInst>(U))
|
||||
CI->mutateFunctionType(NewF->getFunctionType());
|
||||
U->replaceUsesOfWith(F, NewF);
|
||||
}
|
||||
return NewF;
|
||||
}
|
||||
|
||||
std::string lowerLLVMIntrinsicName(IntrinsicInst *II) {
|
||||
Function *IntrinsicFunc = II->getCalledFunction();
|
||||
assert(IntrinsicFunc && "Missing function");
|
||||
std::string FuncName = IntrinsicFunc->getName().str();
|
||||
std::replace(FuncName.begin(), FuncName.end(), '.', '_');
|
||||
FuncName = "spirv." + FuncName;
|
||||
return FuncName;
|
||||
}
|
||||
|
||||
static Function *getOrCreateFunction(Module *M, Type *RetTy,
|
||||
ArrayRef<Type *> ArgTypes,
|
||||
StringRef Name) {
|
||||
FunctionType *FT = FunctionType::get(RetTy, ArgTypes, false);
|
||||
Function *F = M->getFunction(Name);
|
||||
if (F && F->getFunctionType() == FT)
|
||||
return F;
|
||||
Function *NewF = Function::Create(FT, GlobalValue::ExternalLinkage, Name, M);
|
||||
if (F)
|
||||
NewF->setDSOLocal(F->isDSOLocal());
|
||||
NewF->setCallingConv(CallingConv::SPIR_FUNC);
|
||||
return NewF;
|
||||
}
|
||||
|
||||
static void lowerFunnelShifts(Module *M, IntrinsicInst *FSHIntrinsic) {
|
||||
// Get a separate function - otherwise, we'd have to rework the CFG of the
|
||||
// current one. Then simply replace the intrinsic uses with a call to the new
|
||||
// function.
|
||||
// Generate LLVM IR for i* @spirv.llvm_fsh?_i* (i* %a, i* %b, i* %c)
|
||||
FunctionType *FSHFuncTy = FSHIntrinsic->getFunctionType();
|
||||
Type *FSHRetTy = FSHFuncTy->getReturnType();
|
||||
const std::string FuncName = lowerLLVMIntrinsicName(FSHIntrinsic);
|
||||
Function *FSHFunc =
|
||||
getOrCreateFunction(M, FSHRetTy, FSHFuncTy->params(), FuncName);
|
||||
|
||||
if (!FSHFunc->empty()) {
|
||||
FSHIntrinsic->setCalledFunction(FSHFunc);
|
||||
return;
|
||||
}
|
||||
BasicBlock *RotateBB = BasicBlock::Create(M->getContext(), "rotate", FSHFunc);
|
||||
IRBuilder<> IRB(RotateBB);
|
||||
Type *Ty = FSHFunc->getReturnType();
|
||||
// Build the actual funnel shift rotate logic.
|
||||
// In the comments, "int" is used interchangeably with "vector of int
|
||||
// elements".
|
||||
FixedVectorType *VectorTy = dyn_cast<FixedVectorType>(Ty);
|
||||
Type *IntTy = VectorTy ? VectorTy->getElementType() : Ty;
|
||||
unsigned BitWidth = IntTy->getIntegerBitWidth();
|
||||
ConstantInt *BitWidthConstant = IRB.getInt({BitWidth, BitWidth});
|
||||
Value *BitWidthForInsts =
|
||||
VectorTy
|
||||
? IRB.CreateVectorSplat(VectorTy->getNumElements(), BitWidthConstant)
|
||||
: BitWidthConstant;
|
||||
Value *RotateModVal =
|
||||
IRB.CreateURem(/*Rotate*/ FSHFunc->getArg(2), BitWidthForInsts);
|
||||
Value *FirstShift = nullptr, *SecShift = nullptr;
|
||||
if (FSHIntrinsic->getIntrinsicID() == Intrinsic::fshr) {
|
||||
// Shift the less significant number right, the "rotate" number of bits
|
||||
// will be 0-filled on the left as a result of this regular shift.
|
||||
FirstShift = IRB.CreateLShr(FSHFunc->getArg(1), RotateModVal);
|
||||
} else {
|
||||
// Shift the more significant number left, the "rotate" number of bits
|
||||
// will be 0-filled on the right as a result of this regular shift.
|
||||
FirstShift = IRB.CreateShl(FSHFunc->getArg(0), RotateModVal);
|
||||
}
|
||||
// We want the "rotate" number of the more significant int's LSBs (MSBs) to
|
||||
// occupy the leftmost (rightmost) "0 space" left by the previous operation.
|
||||
// Therefore, subtract the "rotate" number from the integer bitsize...
|
||||
Value *SubRotateVal = IRB.CreateSub(BitWidthForInsts, RotateModVal);
|
||||
if (FSHIntrinsic->getIntrinsicID() == Intrinsic::fshr) {
|
||||
// ...and left-shift the more significant int by this number, zero-filling
|
||||
// the LSBs.
|
||||
SecShift = IRB.CreateShl(FSHFunc->getArg(0), SubRotateVal);
|
||||
} else {
|
||||
// ...and right-shift the less significant int by this number, zero-filling
|
||||
// the MSBs.
|
||||
SecShift = IRB.CreateLShr(FSHFunc->getArg(1), SubRotateVal);
|
||||
}
|
||||
// A simple binary addition of the shifted ints yields the final result.
|
||||
IRB.CreateRet(IRB.CreateOr(FirstShift, SecShift));
|
||||
|
||||
FSHIntrinsic->setCalledFunction(FSHFunc);
|
||||
}
|
||||
|
||||
static void buildUMulWithOverflowFunc(Module *M, Function *UMulFunc) {
|
||||
// The function body is already created.
|
||||
if (!UMulFunc->empty())
|
||||
return;
|
||||
|
||||
BasicBlock *EntryBB = BasicBlock::Create(M->getContext(), "entry", UMulFunc);
|
||||
IRBuilder<> IRB(EntryBB);
|
||||
// Build the actual unsigned multiplication logic with the overflow
|
||||
// indication. Do unsigned multiplication Mul = A * B. Then check
|
||||
// if unsigned division Div = Mul / A is not equal to B. If so,
|
||||
// then overflow has happened.
|
||||
Value *Mul = IRB.CreateNUWMul(UMulFunc->getArg(0), UMulFunc->getArg(1));
|
||||
Value *Div = IRB.CreateUDiv(Mul, UMulFunc->getArg(0));
|
||||
Value *Overflow = IRB.CreateICmpNE(UMulFunc->getArg(0), Div);
|
||||
|
||||
// umul.with.overflow intrinsic return a structure, where the first element
|
||||
// is the multiplication result, and the second is an overflow bit.
|
||||
Type *StructTy = UMulFunc->getReturnType();
|
||||
Value *Agg = IRB.CreateInsertValue(UndefValue::get(StructTy), Mul, {0});
|
||||
Value *Res = IRB.CreateInsertValue(Agg, Overflow, {1});
|
||||
IRB.CreateRet(Res);
|
||||
}
|
||||
|
||||
static void lowerUMulWithOverflow(Module *M, IntrinsicInst *UMulIntrinsic) {
|
||||
// Get a separate function - otherwise, we'd have to rework the CFG of the
|
||||
// current one. Then simply replace the intrinsic uses with a call to the new
|
||||
// function.
|
||||
FunctionType *UMulFuncTy = UMulIntrinsic->getFunctionType();
|
||||
Type *FSHLRetTy = UMulFuncTy->getReturnType();
|
||||
const std::string FuncName = lowerLLVMIntrinsicName(UMulIntrinsic);
|
||||
Function *UMulFunc =
|
||||
getOrCreateFunction(M, FSHLRetTy, UMulFuncTy->params(), FuncName);
|
||||
buildUMulWithOverflowFunc(M, UMulFunc);
|
||||
UMulIntrinsic->setCalledFunction(UMulFunc);
|
||||
}
|
||||
|
||||
static void substituteIntrinsicCalls(Module *M, Function *F) {
|
||||
for (BasicBlock &BB : *F) {
|
||||
for (Instruction &I : BB) {
|
||||
auto Call = dyn_cast<CallInst>(&I);
|
||||
if (!Call)
|
||||
continue;
|
||||
Call->setTailCall(false);
|
||||
Function *CF = Call->getCalledFunction();
|
||||
if (!CF || !CF->isIntrinsic())
|
||||
continue;
|
||||
auto *II = cast<IntrinsicInst>(Call);
|
||||
if (II->getIntrinsicID() == Intrinsic::fshl ||
|
||||
II->getIntrinsicID() == Intrinsic::fshr)
|
||||
lowerFunnelShifts(M, II);
|
||||
else if (II->getIntrinsicID() == Intrinsic::umul_with_overflow)
|
||||
lowerUMulWithOverflow(M, II);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool SPIRVPrepareFunctions::runOnModule(Module &M) {
|
||||
for (Function &F : M)
|
||||
substituteIntrinsicCalls(&M, &F);
|
||||
|
||||
std::vector<Function *> FuncsWorklist;
|
||||
bool Changed = false;
|
||||
for (auto &F : M)
|
||||
FuncsWorklist.push_back(&F);
|
||||
|
||||
for (auto *Func : FuncsWorklist) {
|
||||
Function *F = processFunctionSignature(Func);
|
||||
|
||||
bool CreatedNewF = F != Func;
|
||||
|
||||
if (Func->isDeclaration()) {
|
||||
Changed |= CreatedNewF;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (CreatedNewF)
|
||||
Func->eraseFromParent();
|
||||
}
|
||||
|
||||
return Changed;
|
||||
}
|
||||
|
||||
ModulePass *llvm::createSPIRVPrepareFunctionsPass() {
|
||||
return new SPIRVPrepareFunctions();
|
||||
}
|
|
@ -46,8 +46,7 @@ SPIRVSubtarget::SPIRVSubtarget(const Triple &TT, const std::string &CPU,
|
|||
PointerSize(computePointerSize(TT)), SPIRVVersion(0), InstrInfo(),
|
||||
FrameLowering(initSubtargetDependencies(CPU, FS)), TLInfo(TM, *this) {
|
||||
GR = std::make_unique<SPIRVGlobalRegistry>(PointerSize);
|
||||
CallLoweringInfo =
|
||||
std::make_unique<SPIRVCallLowering>(TLInfo, *this, GR.get());
|
||||
CallLoweringInfo = std::make_unique<SPIRVCallLowering>(TLInfo, GR.get());
|
||||
Legalizer = std::make_unique<SPIRVLegalizerInfo>(*this);
|
||||
RegBankInfo = std::make_unique<SPIRVRegisterBankInfo>();
|
||||
InstSelector.reset(
|
||||
|
|
|
@ -140,7 +140,10 @@ TargetPassConfig *SPIRVTargetMachine::createPassConfig(PassManagerBase &PM) {
|
|||
return new SPIRVPassConfig(*this, PM);
|
||||
}
|
||||
|
||||
void SPIRVPassConfig::addIRPasses() { TargetPassConfig::addIRPasses(); }
|
||||
void SPIRVPassConfig::addIRPasses() {
|
||||
TargetPassConfig::addIRPasses();
|
||||
addPass(createSPIRVPrepareFunctionsPass());
|
||||
}
|
||||
|
||||
void SPIRVPassConfig::addISelPrepare() {
|
||||
addPass(createSPIRVEmitIntrinsicsPass(&getTM<SPIRVTargetMachine>()));
|
||||
|
|
|
@ -45,6 +45,14 @@ static size_t getPaddedLen(const StringRef &Str) {
|
|||
return (Len % 4 == 0) ? Len : Len + (4 - (Len % 4));
|
||||
}
|
||||
|
||||
void addStringImm(const StringRef &Str, MCInst &Inst) {
|
||||
const size_t PaddedLen = getPaddedLen(Str);
|
||||
for (unsigned i = 0; i < PaddedLen; i += 4) {
|
||||
// Add an operand for the 32-bits of chars or padding.
|
||||
Inst.addOperand(MCOperand::createImm(convertCharsToWord(Str, i)));
|
||||
}
|
||||
}
|
||||
|
||||
void addStringImm(const StringRef &Str, MachineInstrBuilder &MIB) {
|
||||
const size_t PaddedLen = getPaddedLen(Str);
|
||||
for (unsigned i = 0; i < PaddedLen; i += 4) {
|
||||
|
@ -182,6 +190,24 @@ SPIRV::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass SC) {
|
|||
}
|
||||
}
|
||||
|
||||
SPIRV::MemorySemantics getMemSemantics(AtomicOrdering Ord) {
|
||||
switch (Ord) {
|
||||
case AtomicOrdering::Acquire:
|
||||
return SPIRV::MemorySemantics::Acquire;
|
||||
case AtomicOrdering::Release:
|
||||
return SPIRV::MemorySemantics::Release;
|
||||
case AtomicOrdering::AcquireRelease:
|
||||
return SPIRV::MemorySemantics::AcquireRelease;
|
||||
case AtomicOrdering::SequentiallyConsistent:
|
||||
return SPIRV::MemorySemantics::SequentiallyConsistent;
|
||||
case AtomicOrdering::Unordered:
|
||||
case AtomicOrdering::Monotonic:
|
||||
case AtomicOrdering::NotAtomic:
|
||||
default:
|
||||
return SPIRV::MemorySemantics::None;
|
||||
}
|
||||
}
|
||||
|
||||
MachineInstr *getDefInstrMaybeConstant(Register &ConstReg,
|
||||
const MachineRegisterInfo *MRI) {
|
||||
MachineInstr *ConstInstr = MRI->getVRegDef(ConstReg);
|
||||
|
@ -202,6 +228,11 @@ uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI) {
|
|||
return MI->getOperand(1).getCImm()->getValue().getZExtValue();
|
||||
}
|
||||
|
||||
bool isSpvIntrinsic(MachineInstr &MI, Intrinsic::ID IntrinsicID) {
|
||||
return MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS &&
|
||||
MI.getIntrinsicID() == IntrinsicID;
|
||||
}
|
||||
|
||||
Type *getMDOperandAsType(const MDNode *N, unsigned I) {
|
||||
return cast<ValueAsMetadata>(N->getOperand(I))->getType();
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ class SPIRVInstrInfo;
|
|||
// Add the given string as a series of integer operand, inserting null
|
||||
// terminators and padding to make sure the operands all have 32-bit
|
||||
// little-endian words.
|
||||
void addStringImm(const llvm::StringRef &Str, llvm::MCInst &Inst);
|
||||
void addStringImm(const llvm::StringRef &Str, llvm::MachineInstrBuilder &MIB);
|
||||
void addStringImm(const llvm::StringRef &Str, llvm::IRBuilder<> &B,
|
||||
std::vector<llvm::Value *> &Args);
|
||||
|
@ -67,6 +68,8 @@ llvm::SPIRV::StorageClass addressSpaceToStorageClass(unsigned AddrSpace);
|
|||
llvm::SPIRV::MemorySemantics
|
||||
getMemSemanticsForStorageClass(llvm::SPIRV::StorageClass SC);
|
||||
|
||||
llvm::SPIRV::MemorySemantics getMemSemantics(llvm::AtomicOrdering Ord);
|
||||
|
||||
// Find def instruction for the given ConstReg, walking through
|
||||
// spv_track_constant and ASSIGN_TYPE instructions. Updates ConstReg by def
|
||||
// of OpConstant instruction.
|
||||
|
@ -78,6 +81,9 @@ getDefInstrMaybeConstant(llvm::Register &ConstReg,
|
|||
uint64_t getIConstVal(llvm::Register ConstReg,
|
||||
const llvm::MachineRegisterInfo *MRI);
|
||||
|
||||
// Check if MI is a SPIR-V specific intrinsic call.
|
||||
bool isSpvIntrinsic(llvm::MachineInstr &MI, llvm::Intrinsic::ID IntrinsicID);
|
||||
|
||||
// Get type of i-th operand of the metadata node.
|
||||
llvm::Type *getMDOperandAsType(const llvm::MDNode *N, unsigned I);
|
||||
#endif // LLVM_LIB_TARGET_SPIRV_SPIRVUTILS_H
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
|
||||
|
||||
; CHECK-SPIRV: %[[#Int:]] = OpTypeInt 32 0
|
||||
; CHECK-SPIRV-DAG: %[[#MemScope_Device:]] = OpConstant %[[#Int]] 1
|
||||
; CHECK-SPIRV-DAG: %[[#MemSemEqual_SeqCst:]] = OpConstant %[[#Int]] 16
|
||||
; CHECK-SPIRV-DAG: %[[#MemSemUnequal_Acquire:]] = OpConstant %[[#Int]] 2
|
||||
; CHECK-SPIRV-DAG: %[[#Constant_456:]] = OpConstant %[[#Int]] 456
|
||||
; CHECK-SPIRV-DAG: %[[#Constant_128:]] = OpConstant %[[#Int]] 128
|
||||
; CHECK-SPIRV-DAG: %[[#Bool:]] = OpTypeBool
|
||||
; CHECK-SPIRV-DAG: %[[#Struct:]] = OpTypeStruct %[[#Int]] %[[#Bool]]
|
||||
; CHECK-SPIRV-DAG: %[[#UndefStruct:]] = OpUndef %[[#Struct]]
|
||||
|
||||
; CHECK-SPIRV: %[[#Pointer:]] = OpFunctionParameter %[[#]]
|
||||
; CHECK-SPIRV: %[[#Value_ptr:]] = OpFunctionParameter %[[#]]
|
||||
; CHECK-SPIRV: %[[#Comparator:]] = OpFunctionParameter %[[#]]
|
||||
|
||||
; CHECK-SPIRV: %[[#Value:]] = OpLoad %[[#Int]] %[[#Value_ptr]]
|
||||
; CHECK-SPIRV: %[[#Res:]] = OpAtomicCompareExchange %[[#Int]] %[[#Pointer]] %[[#MemScope_Device]]
|
||||
; CHECK-SPIRV-SAME: %[[#MemSemEqual_SeqCst]] %[[#MemSemUnequal_Acquire]] %[[#Value]] %[[#Comparator]]
|
||||
; CHECK-SPIRV: %[[#Success:]] = OpIEqual %[[#]] %[[#Res]] %[[#Comparator]]
|
||||
; CHECK-SPIRV: %[[#Composite_0:]] = OpCompositeInsert %[[#Struct]] %[[#Res]] %[[#UndefStruct]] 0
|
||||
; CHECK-SPIRV: %[[#Composite_1:]] = OpCompositeInsert %[[#Struct]] %[[#Success]] %[[#Composite_0]] 1
|
||||
; CHECK-SPIRV: %[[#]] = OpCompositeExtract %[[#Bool]] %[[#Composite_1]] 1
|
||||
|
||||
target datalayout = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024"
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define dso_local spir_func void @test(i32* %ptr, i32* %value_ptr, i32 %comparator) local_unnamed_addr {
|
||||
entry:
|
||||
%0 = load i32, i32* %value_ptr, align 4
|
||||
%1 = cmpxchg i32* %ptr, i32 %comparator, i32 %0 seq_cst acquire
|
||||
%2 = extractvalue { i32, i1 } %1, 1
|
||||
br i1 %2, label %cmpxchg.continue, label %cmpxchg.store_expected
|
||||
|
||||
cmpxchg.store_expected: ; preds = %entry
|
||||
%3 = extractvalue { i32, i1 } %1, 0
|
||||
store i32 %3, i32* %value_ptr, align 4
|
||||
br label %cmpxchg.continue
|
||||
|
||||
cmpxchg.continue: ; preds = %cmpxchg.store_expected, %entry
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-SPIRV: %[[#Ptr:]] = OpFunctionParameter %[[#]]
|
||||
; CHECK-SPIRV: %[[#Store_ptr:]] = OpFunctionParameter %[[#]]
|
||||
|
||||
; CHECK-SPIRV: %[[#Res_1:]] = OpAtomicCompareExchange %[[#Int]] %[[#Ptr]] %[[#MemScope_Device]]
|
||||
; CHECK-SPIRV-SAME: %[[#MemSemEqual_SeqCst]] %[[#MemSemUnequal_Acquire]] %[[#Constant_456]] %[[#Constant_128]]
|
||||
; CHECK-SPIRV: %[[#Success_1:]] = OpIEqual %[[#]] %[[#Res_1]] %[[#Constant_128]]
|
||||
; CHECK-SPIRV: %[[#Composite:]] = OpCompositeInsert %[[#Struct]] %[[#Res_1]] %[[#UndefStruct]] 0
|
||||
; CHECK-SPIRV: %[[#Composite_1:]] = OpCompositeInsert %[[#Struct]] %[[#Success_1]] %[[#Composite]] 1
|
||||
; CHECK-SPIRV: OpStore %[[#Store_ptr]] %[[#Composite_1]]
|
||||
|
||||
; Function Attrs: nounwind
|
||||
define dso_local spir_func void @test2(i32* %ptr, {i32, i1}* %store_ptr) local_unnamed_addr {
|
||||
entry:
|
||||
%0 = cmpxchg i32* %ptr, i32 128, i32 456 seq_cst acquire
|
||||
store { i32, i1 } %0, { i32, i1 }* %store_ptr, align 4
|
||||
ret void
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
|
||||
|
||||
; CHECK-DAG: [[I32:%.+]] = OpTypeInt 32
|
||||
; CHECK-DAG: [[BOOL:%.+]] = OpTypeBool
|
||||
; CHECK-DAG: [[TRUE:%.+]] = OpConstantTrue
|
||||
; CHECK-DAG: [[FALSE:%.+]] = OpConstantFalse
|
||||
|
||||
define i1 @test_if(i32 %a, i32 %b) {
|
||||
entry:
|
||||
%cond = icmp eq i32 %a, %b
|
||||
br i1 %cond, label %true_label, label %false_label
|
||||
true_label:
|
||||
ret i1 true
|
||||
false_label:
|
||||
ret i1 false
|
||||
}
|
||||
|
||||
; CHECK: OpFunction
|
||||
; CHECK: [[A:%.+]] = OpFunctionParameter [[I32]]
|
||||
; CHECK: [[B:%.+]] = OpFunctionParameter [[I32]]
|
||||
; CHECK: [[ENTRY:%.+]] = OpLabel
|
||||
; CHECK: [[COND:%.+]] = OpIEqual [[BOOL]] [[A]] [[B]]
|
||||
; CHECK: OpBranchConditional [[COND]] [[TRUE_LABEL:%.+]] [[FALSE_LABEL:%.+]]
|
||||
; CHECK: [[TRUE_LABEL]] = OpLabel
|
||||
; CHECK: OpReturnValue [[TRUE]]
|
||||
; CHECK: [[FALSE_LABEL]] = OpLabel
|
||||
; CHECK: OpReturnValue [[FALSE]]
|
|
@ -0,0 +1,38 @@
|
|||
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
|
||||
|
||||
@global = addrspace(1) constant i32 1 ; OpenCL global memory
|
||||
@constant = addrspace(2) constant i32 2 ; OpenCL constant memory
|
||||
@local = addrspace(3) constant i32 3 ; OpenCL local memory
|
||||
|
||||
define i32 @getGlobal1() {
|
||||
%g = load i32, i32 addrspace(1)* @global
|
||||
ret i32 %g
|
||||
}
|
||||
|
||||
define i32 @getGlobal2() {
|
||||
%g = load i32, i32 addrspace(2)* @constant
|
||||
ret i32 %g
|
||||
}
|
||||
|
||||
define i32 @getGlobal3() {
|
||||
%g = load i32, i32 addrspace(3)* @local
|
||||
ret i32 %g
|
||||
}
|
||||
|
||||
; CHECK: [[INT:%.+]] = OpTypeInt 32
|
||||
|
||||
; CHECK-DAG: [[PTR_TO_INT_AS1:%.+]] = OpTypePointer CrossWorkgroup [[INT]]
|
||||
; CHECK-DAG: [[PTR_TO_INT_AS2:%.+]] = OpTypePointer UniformConstant [[INT]]
|
||||
; CHECK-DAG: [[PTR_TO_INT_AS3:%.+]] = OpTypePointer Workgroup [[INT]]
|
||||
|
||||
; CHECK-DAG: [[CST_AS1:%.+]] = OpConstant [[INT]] 1
|
||||
; CHECK-DAG: [[CST_AS2:%.+]] = OpConstant [[INT]] 2
|
||||
; CHECK-DAG: [[CST_AS3:%.+]] = OpConstant [[INT]] 3
|
||||
|
||||
; CHECK-DAG: [[GV1:%.+]] = OpVariable [[PTR_TO_INT_AS1]] CrossWorkgroup [[CST_AS1]]
|
||||
; CHECK-DAG: [[GV2:%.+]] = OpVariable [[PTR_TO_INT_AS2]] UniformConstant [[CST_AS2]]
|
||||
; CHECK-DAG: [[GV3:%.+]] = OpVariable [[PTR_TO_INT_AS3]] Workgroup [[CST_AS3]]
|
||||
|
||||
; CHECK: OpLoad [[INT]] [[GV1]]
|
||||
; CHECK: OpLoad [[INT]] [[GV2]]
|
||||
; CHECK: OpLoad [[INT]] [[GV3]]
|
|
@ -0,0 +1,20 @@
|
|||
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
|
||||
|
||||
%aggregate = type { i8, i32 }
|
||||
|
||||
define %aggregate @getConstant() {
|
||||
ret %aggregate { i8 1, i32 2 }
|
||||
}
|
||||
|
||||
; CHECK: OpName [[GET:%.+]] "getConstant"
|
||||
|
||||
; CHECK-DAG: [[I8:%.+]] = OpTypeInt 8
|
||||
; CHECK-DAG: [[I32:%.+]] = OpTypeInt 32
|
||||
; CHECK-DAG: [[AGGREGATE:%.+]] = OpTypeStruct [[I8]] [[I32]]
|
||||
; CHECK-DAG: [[CST_I8:%.+]] = OpConstant [[I8]] 1
|
||||
; CHECK-DAG: [[CST_I32:%.+]] = OpConstant [[I32]] 2
|
||||
; CHECK-DAG: [[CST_AGGREGATE:%.+]] = OpConstantComposite [[AGGREGATE]] [[CST_I8]] [[CST_I32]]
|
||||
|
||||
; CHECK: [[GET]] = OpFunction [[AGGREGATE]]
|
||||
; CHECK: OpReturnValue [[CST_AGGREGATE]]
|
||||
; CHECK: OpFunctionEnd
|
|
@ -0,0 +1,17 @@
|
|||
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
|
||||
|
||||
define i1 @getConstantTrue() {
|
||||
ret i1 true
|
||||
}
|
||||
|
||||
define i1 @getConstantFalse() {
|
||||
ret i1 false
|
||||
}
|
||||
|
||||
; CHECK: [[BOOL:%.+]] = OpTypeBool
|
||||
; CHECK-DAG: [[FN:%.+]] = OpTypeFunction [[BOOL]]
|
||||
; CHECK-DAG: [[TRUE:%.+]] = OpConstantTrue
|
||||
; CHECK-DAG: [[FALSE:%.+]] = OpConstantFalse
|
||||
|
||||
; CHECK: OpFunction [[BOOL]] None [[FN]]
|
||||
; CHECK: OpFunction [[BOOL]] None [[FN]]
|
|
@ -0,0 +1,27 @@
|
|||
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
|
||||
|
||||
; OpenCL global memory
|
||||
define i32 addrspace(1)* @getConstant1() {
|
||||
ret i32 addrspace(1)* null
|
||||
}
|
||||
|
||||
; OpenCL constant memory
|
||||
define i32 addrspace(2)* @getConstant2() {
|
||||
ret i32 addrspace(2)* null
|
||||
}
|
||||
|
||||
; OpenCL local memory
|
||||
define i32 addrspace(3)* @getConstant3() {
|
||||
ret i32 addrspace(3)* null
|
||||
}
|
||||
|
||||
; CHECK: [[INT:%.+]] = OpTypeInt 32
|
||||
|
||||
; CHECK-DAG: [[PTR_AS1:%.+]] = OpTypePointer CrossWorkgroup [[INT]]
|
||||
; CHECK-DAG: OpConstantNull [[PTR_AS1]]
|
||||
|
||||
; CHECK-DAG: [[PTR_AS2:%.+]] = OpTypePointer UniformConstant [[INT]]
|
||||
; CHECK-DAG: OpConstantNull [[PTR_AS2]]
|
||||
|
||||
; CHECK-DAG: [[PTR_AS3:%.+]] = OpTypePointer Workgroup [[INT]]
|
||||
; CHECK-DAG: OpConstantNull [[PTR_AS3]]
|
|
@ -0,0 +1,12 @@
|
|||
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
|
||||
|
||||
; TODO: Add test for matrix. But how are they represented in LLVM IR?
|
||||
|
||||
define <4 x i8> @getVectorConstant() {
|
||||
ret <4 x i8> <i8 1, i8 1, i8 1, i8 1>
|
||||
}
|
||||
|
||||
; CHECK-DAG: [[I8:%.+]] = OpTypeInt 8
|
||||
; CHECK-DAG: [[VECTOR:%.+]] = OpTypeVector [[I8]]
|
||||
; CHECK-DAG: [[CST_I8:%.+]] = OpConstant [[I8]] 1
|
||||
; CHECK-DAG: [[CST_VECTOR:%.+]] = OpConstantComposite [[VECTOR]] [[CST_I8]] [[CST_I8]] [[CST_I8]] [[CST_I8]]
|
|
@ -0,0 +1,60 @@
|
|||
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
|
||||
|
||||
; CHECK-DAG: OpName [[FUN:%.+]] "fun"
|
||||
; CHECK-DAG: OpName [[FOO:%.+]] "foo"
|
||||
; CHECK-DAG: OpName [[GOO:%.+]] "goo"
|
||||
|
||||
; CHECK-NOT: DAG-FENCE
|
||||
|
||||
; CHECK-DAG: [[I16:%.+]] = OpTypeInt 16
|
||||
; CHECK-DAG: [[I32:%.+]] = OpTypeInt 32
|
||||
; CHECK-DAG: [[I64:%.+]] = OpTypeInt 64
|
||||
; CHECK-DAG: [[FN3:%.+]] = OpTypeFunction [[I32]] [[I32]] [[I16]] [[I64]]
|
||||
; CHECK-DAG: [[PAIR:%.+]] = OpTypeStruct [[I32]] [[I16]]
|
||||
; CHECK-DAG: [[FN1:%.+]] = OpTypeFunction [[I32]] [[I32]]
|
||||
; CHECK-DAG: [[FN2:%.+]] = OpTypeFunction [[I32]] [[PAIR]] [[I64]]
|
||||
; According to the Specification, the OpUndef can be defined in Function.
|
||||
; But the Specification also recommends defining it here. So we enforce that.
|
||||
; CHECK-DAG: [[UNDEF:%.+]] = OpUndef [[PAIR]]
|
||||
|
||||
|
||||
declare i32 @fun(i32 %value)
|
||||
|
||||
; Check for @fun declaration
|
||||
; CHECK: [[FUN]] = OpFunction [[I32]] None [[FN1]]
|
||||
; CHECK-NEXT: OpFunctionParameter [[I32]]
|
||||
; CHECK-NEXT: OpFunctionEnd
|
||||
|
||||
|
||||
define i32 @foo({i32, i16} %in, i64 %unused) {
|
||||
%first = extractvalue {i32, i16} %in, 0
|
||||
%bar = call i32 @fun(i32 %first)
|
||||
ret i32 %bar
|
||||
}
|
||||
|
||||
; CHECK: [[GOO]] = OpFunction [[I32]] None [[FN3]]
|
||||
; CHECK-NEXT: [[A:%.+]] = OpFunctionParameter [[I32]]
|
||||
; CHECK-NEXT: [[B:%.+]] = OpFunctionParameter [[I16]]
|
||||
; CHECK-NEXT: [[C:%.+]] = OpFunctionParameter [[I64]]
|
||||
; CHECK: [[AGG1:%.+]] = OpCompositeInsert [[PAIR]] [[A]] [[UNDEF]] 0
|
||||
; CHECK: [[AGG2:%.+]] = OpCompositeInsert [[PAIR]] [[B]] [[AGG1]] 1
|
||||
; CHECK: [[RET:%.+]] = OpFunctionCall [[I32]] [[FOO]] [[AGG2]] [[C]]
|
||||
; CHECK: OpReturnValue [[RET]]
|
||||
; CHECK: OpFunctionEnd
|
||||
|
||||
; CHECK: [[FOO]] = OpFunction [[I32]] None [[FN2]]
|
||||
; CHECK-NEXT: [[IN:%.+]] = OpFunctionParameter [[PAIR]]
|
||||
; CHECK-NEXT: OpFunctionParameter [[I64]]
|
||||
; CHECK: [[FIRST:%.+]] = OpCompositeExtract [[I32]] [[IN]] 0
|
||||
; CHECK: [[BAR:%.+]] = OpFunctionCall [[I32]] [[FUN]] [[FIRST]]
|
||||
; CHECK: OpReturnValue [[BAR]]
|
||||
; CHECK: OpFunctionEnd
|
||||
|
||||
define i32 @goo(i32 %a, i16 %b, i64 %c) {
|
||||
%agg1 = insertvalue {i32, i16} undef, i32 %a, 0
|
||||
%agg2 = insertvalue {i32, i16} %agg1, i16 %b, 1
|
||||
%ret = call i32 @foo({i32, i16} %agg2, i64 %c)
|
||||
ret i32 %ret
|
||||
}
|
||||
|
||||
; TODO: test tailcall?
|
|
@ -0,0 +1,64 @@
|
|||
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
|
||||
|
||||
; CHECK-DAG: OpName [[FOOBAR:%.+]] "foobar"
|
||||
; CHECK-DAG: OpName [[PRODUCER:%.+]] "producer"
|
||||
; CHECK-DAG: OpName [[CONSUMER:%.+]] "consumer"
|
||||
|
||||
; CHECK-NOT: DAG-FENCE
|
||||
|
||||
%ty1 = type {i16, i32}
|
||||
%ty2 = type {%ty1, i64}
|
||||
|
||||
; CHECK-DAG: [[I16:%.+]] = OpTypeInt 16
|
||||
; CHECK-DAG: [[I32:%.+]] = OpTypeInt 32
|
||||
; CHECK-DAG: [[I64:%.+]] = OpTypeInt 64
|
||||
; CHECK-DAG: [[TY1:%.+]] = OpTypeStruct [[I16]] [[I32]]
|
||||
; CHECK-DAG: [[TY2:%.+]] = OpTypeStruct [[TY1]] [[I64]]
|
||||
; CHECK-DAG: [[UNDEF_I16:%.+]] = OpUndef [[I16]]
|
||||
; CHECK-DAG: [[UNDEF_I64:%.+]] = OpUndef [[I64]]
|
||||
; CHECK-DAG: [[UNDEF_TY2:%.+]] = OpUndef [[TY2]]
|
||||
; CHECK-DAG: [[CST_42:%.+]] = OpConstant [[I32]] 42
|
||||
|
||||
; CHECK-NOT: DAG-FENCE
|
||||
|
||||
define i32 @foobar() {
|
||||
%agg = call %ty2 @producer(i16 undef, i32 42, i64 undef)
|
||||
%ret = call i32 @consumer(%ty2 %agg)
|
||||
ret i32 %ret
|
||||
}
|
||||
|
||||
; CHECK: [[FOOBAR]] = OpFunction
|
||||
; CHECK: [[AGG:%.+]] = OpFunctionCall [[TY2]] [[PRODUCER]] [[UNDEF_I16]] [[CST_42]] [[UNDEF_I64]]
|
||||
; CHECK: [[RET:%.+]] = OpFunctionCall [[I32]] [[CONSUMER]] [[AGG]]
|
||||
; CHECK: OpReturnValue [[RET]]
|
||||
; CHECK: OpFunctionEnd
|
||||
|
||||
|
||||
define %ty2 @producer(i16 %a, i32 %b, i64 %c) {
|
||||
%agg1 = insertvalue %ty2 undef, i16 %a, 0, 0
|
||||
%agg2 = insertvalue %ty2 %agg1, i32 %b, 0, 1
|
||||
%agg3 = insertvalue %ty2 %agg2, i64 %c, 1
|
||||
ret %ty2 %agg3
|
||||
}
|
||||
|
||||
; CHECK: [[PRODUCER]] = OpFunction
|
||||
; CHECK: [[A:%.+]] = OpFunctionParameter [[I16]]
|
||||
; CHECK: [[B:%.+]] = OpFunctionParameter [[I32]]
|
||||
; CHECK: [[C:%.+]] = OpFunctionParameter [[I64]]
|
||||
; CHECK: [[AGG1:%.+]] = OpCompositeInsert [[TY2]] [[A]] [[UNDEF_TY2]] 0 0
|
||||
; CHECK: [[AGG2:%.+]] = OpCompositeInsert [[TY2]] [[B]] [[AGG1]] 0 1
|
||||
; CHECK: [[AGG3:%.+]] = OpCompositeInsert [[TY2]] [[C]] [[AGG2]] 1
|
||||
; CHECK: OpReturnValue [[AGG3]]
|
||||
; CHECK: OpFunctionEnd
|
||||
|
||||
|
||||
define i32 @consumer(%ty2 %agg) {
|
||||
%ret = extractvalue %ty2 %agg, 0, 1
|
||||
ret i32 %ret
|
||||
}
|
||||
|
||||
; CHECK: [[CONSUMER]] = OpFunction
|
||||
; CHECK: [[AGG:%.+]] = OpFunctionParameter [[TY2]]
|
||||
; CHECK: [[RET:%.+]] = OpCompositeExtract [[I32]] [[AGG]] 0 1
|
||||
; CHECK: OpReturnValue [[RET]]
|
||||
; CHECK: OpFunctionEnd
|
|
@ -0,0 +1,91 @@
|
|||
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
|
||||
|
||||
; CHECK-DAG: OpName [[SCALAR_ADD:%.+]] "scalar_add"
|
||||
; CHECK-DAG: OpName [[SCALAR_SUB:%.+]] "scalar_sub"
|
||||
; CHECK-DAG: OpName [[SCALAR_MUL:%.+]] "scalar_mul"
|
||||
; CHECK-DAG: OpName [[SCALAR_UDIV:%.+]] "scalar_udiv"
|
||||
; CHECK-DAG: OpName [[SCALAR_SDIV:%.+]] "scalar_sdiv"
|
||||
; TODO: add tests for urem + srem
|
||||
; TODO: add test for OpSNegate
|
||||
|
||||
; CHECK-NOT: DAG-FENCE
|
||||
|
||||
; CHECK-DAG: [[SCALAR:%.+]] = OpTypeInt 32
|
||||
; CHECK-DAG: [[SCALAR_FN:%.+]] = OpTypeFunction [[SCALAR]] [[SCALAR]] [[SCALAR]]
|
||||
|
||||
; CHECK-NOT: DAG-FENCE
|
||||
|
||||
|
||||
; Test add on scalar:
|
||||
define i32 @scalar_add(i32 %a, i32 %b) {
|
||||
%c = add i32 %a, %b
|
||||
ret i32 %c
|
||||
}
|
||||
|
||||
; CHECK: [[SCALAR_ADD]] = OpFunction [[SCALAR]] None [[SCALAR_FN]]
|
||||
; CHECK-NEXT: [[A:%.+]] = OpFunctionParameter [[SCALAR]]
|
||||
; CHECK-NEXT: [[B:%.+]] = OpFunctionParameter [[SCALAR]]
|
||||
; CHECK: OpLabel
|
||||
; CHECK: [[C:%.+]] = OpIAdd [[SCALAR]] [[A]] [[B]]
|
||||
; CHECK: OpReturnValue [[C]]
|
||||
; CHECK-NEXT: OpFunctionEnd
|
||||
|
||||
|
||||
; Test sub on scalar:
|
||||
define i32 @scalar_sub(i32 %a, i32 %b) {
|
||||
%c = sub i32 %a, %b
|
||||
ret i32 %c
|
||||
}
|
||||
|
||||
; CHECK: [[SCALAR_SUB]] = OpFunction [[SCALAR]] None [[SCALAR_FN]]
|
||||
; CHECK-NEXT: [[A:%.+]] = OpFunctionParameter [[SCALAR]]
|
||||
; CHECK-NEXT: [[B:%.+]] = OpFunctionParameter [[SCALAR]]
|
||||
; CHECK: OpLabel
|
||||
; CHECK: [[C:%.+]] = OpISub [[SCALAR]] [[A]] [[B]]
|
||||
; CHECK: OpReturnValue [[C]]
|
||||
; CHECK-NEXT: OpFunctionEnd
|
||||
|
||||
|
||||
; Test mul on scalar:
|
||||
define i32 @scalar_mul(i32 %a, i32 %b) {
|
||||
%c = mul i32 %a, %b
|
||||
ret i32 %c
|
||||
}
|
||||
|
||||
; CHECK: [[SCALAR_MUL]] = OpFunction [[SCALAR]] None [[SCALAR_FN]]
|
||||
; CHECK-NEXT: [[A:%.+]] = OpFunctionParameter [[SCALAR]]
|
||||
; CHECK-NEXT: [[B:%.+]] = OpFunctionParameter [[SCALAR]]
|
||||
; CHECK: OpLabel
|
||||
; CHECK: [[C:%.+]] = OpIMul [[SCALAR]] [[A]] [[B]]
|
||||
; CHECK: OpReturnValue [[C]]
|
||||
; CHECK-NEXT: OpFunctionEnd
|
||||
|
||||
|
||||
; Test udiv on scalar:
|
||||
define i32 @scalar_udiv(i32 %a, i32 %b) {
|
||||
%c = udiv i32 %a, %b
|
||||
ret i32 %c
|
||||
}
|
||||
|
||||
; CHECK: [[SCALAR_UDIV]] = OpFunction [[SCALAR]] None [[SCALAR_FN]]
|
||||
; CHECK-NEXT: [[A:%.+]] = OpFunctionParameter [[SCALAR]]
|
||||
; CHECK-NEXT: [[B:%.+]] = OpFunctionParameter [[SCALAR]]
|
||||
; CHECK: OpLabel
|
||||
; CHECK: [[C:%.+]] = OpUDiv [[SCALAR]] [[A]] [[B]]
|
||||
; CHECK: OpReturnValue [[C]]
|
||||
; CHECK-NEXT: OpFunctionEnd
|
||||
|
||||
|
||||
; Test sdiv on scalar:
|
||||
define i32 @scalar_sdiv(i32 %a, i32 %b) {
|
||||
%c = sdiv i32 %a, %b
|
||||
ret i32 %c
|
||||
}
|
||||
|
||||
; CHECK: [[SCALAR_SDIV]] = OpFunction [[SCALAR]] None [[SCALAR_FN]]
|
||||
; CHECK-NEXT: [[A:%.+]] = OpFunctionParameter [[SCALAR]]
|
||||
; CHECK-NEXT: [[B:%.+]] = OpFunctionParameter [[SCALAR]]
|
||||
; CHECK: OpLabel
|
||||
; CHECK: [[C:%.+]] = OpSDiv [[SCALAR]] [[A]] [[B]]
|
||||
; CHECK: OpReturnValue [[C]]
|
||||
; CHECK-NEXT: OpFunctionEnd
|
|
@ -0,0 +1,44 @@
|
|||
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
|
||||
|
||||
; CHECK-DAG: OpName [[SCALARi32:%.+]] "select_i32"
|
||||
; CHECK-DAG: OpName [[VEC2i32:%.+]] "select_i32v2"
|
||||
; CHECK-DAG: OpName [[VEC2i32v2:%.+]] "select_v2i32v2"
|
||||
|
||||
; CHECK: [[SCALARi32]] = OpFunction
|
||||
; CHECK-NEXT: [[C:%.+]] = OpFunctionParameter
|
||||
; CHECK-NEXT: [[T:%.+]] = OpFunctionParameter
|
||||
; CHECK-NEXT: [[F:%.+]] = OpFunctionParameter
|
||||
; CHECK: OpLabel
|
||||
; CHECK: [[R:%.+]] = OpSelect {{%.+}} [[C]] [[T]] [[F]]
|
||||
; CHECK: OpReturnValue [[R]]
|
||||
; CHECK-NEXT: OpFunctionEnd
|
||||
define i32 @select_i32(i1 %c, i32 %t, i32 %f) {
|
||||
%r = select i1 %c, i32 %t, i32 %f
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
; CHECK: [[VEC2i32]] = OpFunction
|
||||
; CHECK-NEXT: [[C:%.+]] = OpFunctionParameter
|
||||
; CHECK-NEXT: [[T:%.+]] = OpFunctionParameter
|
||||
; CHECK-NEXT: [[F:%.+]] = OpFunctionParameter
|
||||
; CHECK: OpLabel
|
||||
; CHECK: [[R:%.+]] = OpSelect {{%.+}} [[C]] [[T]] [[F]]
|
||||
; CHECK: OpReturnValue [[R]]
|
||||
; CHECK-NEXT: OpFunctionEnd
|
||||
define <2 x i32> @select_i32v2(i1 %c, <2 x i32> %t, <2 x i32> %f) {
|
||||
%r = select i1 %c, <2 x i32> %t, <2 x i32> %f
|
||||
ret <2 x i32> %r
|
||||
}
|
||||
|
||||
; CHECK: [[VEC2i32v2]] = OpFunction
|
||||
; CHECK-NEXT: [[C:%.+]] = OpFunctionParameter
|
||||
; CHECK-NEXT: [[T:%.+]] = OpFunctionParameter
|
||||
; CHECK-NEXT: [[F:%.+]] = OpFunctionParameter
|
||||
; CHECK: OpLabel
|
||||
; CHECK: [[R:%.+]] = OpSelect {{%.+}} [[C]] [[T]] [[F]]
|
||||
; CHECK: OpReturnValue [[R]]
|
||||
; CHECK-NEXT: OpFunctionEnd
|
||||
define <2 x i32> @select_v2i32v2(<2 x i1> %c, <2 x i32> %t, <2 x i32> %f) {
|
||||
%r = select <2 x i1> %c, <2 x i32> %t, <2 x i32> %f
|
||||
ret <2 x i32> %r
|
||||
}
|
|
@ -0,0 +1,107 @@
|
|||
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
|
||||
|
||||
; CHECK-DAG: OpName [[VECTOR_FNEG:%.+]] "vector_fneg"
|
||||
; CHECK-DAG: OpName [[VECTOR_FADD:%.+]] "vector_fadd"
|
||||
; CHECK-DAG: OpName [[VECTOR_FSUB:%.+]] "vector_fsub"
|
||||
; CHECK-DAG: OpName [[VECTOR_FMUL:%.+]] "vector_fmul"
|
||||
; CHECK-DAG: OpName [[VECTOR_FDIV:%.+]] "vector_fdiv"
|
||||
; CHECK-DAG: OpName [[VECTOR_FREM:%.+]] "vector_frem"
|
||||
; TODO: add test for OpFMod
|
||||
|
||||
; CHECK-NOT: DAG-FENCE
|
||||
|
||||
; CHECK-DAG: [[FP16:%.+]] = OpTypeFloat 16
|
||||
; CHECK-DAG: [[VECTOR:%.+]] = OpTypeVector [[FP16]]
|
||||
; CHECK-DAG: [[VECTOR_FN:%.+]] = OpTypeFunction [[VECTOR]] [[VECTOR]] [[VECTOR]]
|
||||
|
||||
; CHECK-NOT: DAG-FENCE
|
||||
|
||||
|
||||
; Test fneg on vector:
|
||||
define <2 x half> @vector_fneg(<2 x half> %a, <2 x half> %unused) {
|
||||
%c = fneg <2 x half> %a
|
||||
ret <2 x half> %c
|
||||
}
|
||||
|
||||
; CHECK: [[VECTOR_FNEG]] = OpFunction [[VECTOR]] None [[VECTOR_FN]]
|
||||
; CHECK-NEXT: [[A:%.+]] = OpFunctionParameter [[VECTOR]]
|
||||
; CHECK-NEXT: [[B:%.+]] = OpFunctionParameter [[VECTOR]]
|
||||
; CHECK: OpLabel
|
||||
; CHECK: [[C:%.+]] = OpFNegate [[VECTOR]] [[A]]
|
||||
; CHECK: OpReturnValue [[C]]
|
||||
; CHECK-NEXT: OpFunctionEnd
|
||||
|
||||
|
||||
; Test fadd on vector:
|
||||
define <2 x half> @vector_fadd(<2 x half> %a, <2 x half> %b) {
|
||||
%c = fadd <2 x half> %a, %b
|
||||
ret <2 x half> %c
|
||||
}
|
||||
|
||||
; CHECK: [[VECTOR_FADD]] = OpFunction [[VECTOR]] None [[VECTOR_FN]]
|
||||
; CHECK-NEXT: [[A:%.+]] = OpFunctionParameter [[VECTOR]]
|
||||
; CHECK-NEXT: [[B:%.+]] = OpFunctionParameter [[VECTOR]]
|
||||
; CHECK: OpLabel
|
||||
; CHECK: [[C:%.+]] = OpFAdd [[VECTOR]] [[A]] [[B]]
|
||||
; CHECK: OpReturnValue [[C]]
|
||||
; CHECK-NEXT: OpFunctionEnd
|
||||
|
||||
|
||||
; Test fsub on vector:
|
||||
define <2 x half> @vector_fsub(<2 x half> %a, <2 x half> %b) {
|
||||
%c = fsub <2 x half> %a, %b
|
||||
ret <2 x half> %c
|
||||
}
|
||||
|
||||
; CHECK: [[VECTOR_FSUB]] = OpFunction [[VECTOR]] None [[VECTOR_FN]]
|
||||
; CHECK-NEXT: [[A:%.+]] = OpFunctionParameter [[VECTOR]]
|
||||
; CHECK-NEXT: [[B:%.+]] = OpFunctionParameter [[VECTOR]]
|
||||
; CHECK: OpLabel
|
||||
; CHECK: [[C:%.+]] = OpFSub [[VECTOR]] [[A]] [[B]]
|
||||
; CHECK: OpReturnValue [[C]]
|
||||
; CHECK-NEXT: OpFunctionEnd
|
||||
|
||||
|
||||
; Test fmul on vector:
|
||||
define <2 x half> @vector_fmul(<2 x half> %a, <2 x half> %b) {
|
||||
%c = fmul <2 x half> %a, %b
|
||||
ret <2 x half> %c
|
||||
}
|
||||
|
||||
; CHECK: [[VECTOR_FMUL]] = OpFunction [[VECTOR]] None [[VECTOR_FN]]
|
||||
; CHECK-NEXT: [[A:%.+]] = OpFunctionParameter [[VECTOR]]
|
||||
; CHECK-NEXT: [[B:%.+]] = OpFunctionParameter [[VECTOR]]
|
||||
; CHECK: OpLabel
|
||||
; CHECK: [[C:%.+]] = OpFMul [[VECTOR]] [[A]] [[B]]
|
||||
; CHECK: OpReturnValue [[C]]
|
||||
; CHECK-NEXT: OpFunctionEnd
|
||||
|
||||
|
||||
; Test fdiv on vector:
|
||||
define <2 x half> @vector_fdiv(<2 x half> %a, <2 x half> %b) {
|
||||
%c = fdiv <2 x half> %a, %b
|
||||
ret <2 x half> %c
|
||||
}
|
||||
|
||||
; CHECK: [[VECTOR_FDIV]] = OpFunction [[VECTOR]] None [[VECTOR_FN]]
|
||||
; CHECK-NEXT: [[A:%.+]] = OpFunctionParameter [[VECTOR]]
|
||||
; CHECK-NEXT: [[B:%.+]] = OpFunctionParameter [[VECTOR]]
|
||||
; CHECK: OpLabel
|
||||
; CHECK: [[C:%.+]] = OpFDiv [[VECTOR]] [[A]] [[B]]
|
||||
; CHECK: OpReturnValue [[C]]
|
||||
; CHECK-NEXT: OpFunctionEnd
|
||||
|
||||
|
||||
; Test frem on vector:
|
||||
define <2 x half> @vector_frem(<2 x half> %a, <2 x half> %b) {
|
||||
%c = frem <2 x half> %a, %b
|
||||
ret <2 x half> %c
|
||||
}
|
||||
|
||||
; CHECK: [[VECTOR_FREM]] = OpFunction [[VECTOR]] None [[VECTOR_FN]]
|
||||
; CHECK-NEXT: [[A:%.+]] = OpFunctionParameter [[VECTOR]]
|
||||
; CHECK-NEXT: [[B:%.+]] = OpFunctionParameter [[VECTOR]]
|
||||
; CHECK: OpLabel
|
||||
; CHECK: [[C:%.+]] = OpFRem [[VECTOR]] [[A]] [[B]]
|
||||
; CHECK: OpReturnValue [[C]]
|
||||
; CHECK-NEXT: OpFunctionEnd
|
|
@ -0,0 +1,92 @@
|
|||
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
|
||||
|
||||
; CHECK-DAG: OpName [[VECTOR_ADD:%.+]] "vector_add"
|
||||
; CHECK-DAG: OpName [[VECTOR_SUB:%.+]] "vector_sub"
|
||||
; CHECK-DAG: OpName [[VECTOR_MUL:%.+]] "vector_mul"
|
||||
; CHECK-DAG: OpName [[VECTOR_UDIV:%.+]] "vector_udiv"
|
||||
; CHECK-DAG: OpName [[VECTOR_SDIV:%.+]] "vector_sdiv"
|
||||
; TODO: add tests for urem + srem
|
||||
; TODO: add test for OpSNegate
|
||||
|
||||
; CHECK-NOT: DAG-FENCE
|
||||
|
||||
; CHECK-DAG: [[I16:%.+]] = OpTypeInt 16
|
||||
; CHECK-DAG: [[VECTOR:%.+]] = OpTypeVector [[I16]]
|
||||
; CHECK-DAG: [[VECTOR_FN:%.+]] = OpTypeFunction [[VECTOR]] [[VECTOR]] [[VECTOR]]
|
||||
|
||||
; CHECK-NOT: DAG-FENCE
|
||||
|
||||
|
||||
; Test add on vector:
|
||||
define <2 x i16> @vector_add(<2 x i16> %a, <2 x i16> %b) {
|
||||
%c = add <2 x i16> %a, %b
|
||||
ret <2 x i16> %c
|
||||
}
|
||||
|
||||
; CHECK: [[VECTOR_ADD]] = OpFunction [[VECTOR]] None [[VECTOR_FN]]
|
||||
; CHECK-NEXT: [[A:%.+]] = OpFunctionParameter [[VECTOR]]
|
||||
; CHECK-NEXT: [[B:%.+]] = OpFunctionParameter [[VECTOR]]
|
||||
; CHECK: OpLabel
|
||||
; CHECK: [[C:%.+]] = OpIAdd [[VECTOR]] [[A]] [[B]]
|
||||
; CHECK: OpReturnValue [[C]]
|
||||
; CHECK-NEXT: OpFunctionEnd
|
||||
|
||||
|
||||
; Test sub on vector:
|
||||
define <2 x i16> @vector_sub(<2 x i16> %a, <2 x i16> %b) {
|
||||
%c = sub <2 x i16> %a, %b
|
||||
ret <2 x i16> %c
|
||||
}
|
||||
|
||||
; CHECK: [[VECTOR_SUB]] = OpFunction [[VECTOR]] None [[VECTOR_FN]]
|
||||
; CHECK-NEXT: [[A:%.+]] = OpFunctionParameter [[VECTOR]]
|
||||
; CHECK-NEXT: [[B:%.+]] = OpFunctionParameter [[VECTOR]]
|
||||
; CHECK: OpLabel
|
||||
; CHECK: [[C:%.+]] = OpISub [[VECTOR]] [[A]] [[B]]
|
||||
; CHECK: OpReturnValue [[C]]
|
||||
; CHECK-NEXT: OpFunctionEnd
|
||||
|
||||
|
||||
; Test mul on vector:
|
||||
define <2 x i16> @vector_mul(<2 x i16> %a, <2 x i16> %b) {
|
||||
%c = mul <2 x i16> %a, %b
|
||||
ret <2 x i16> %c
|
||||
}
|
||||
|
||||
; CHECK: [[VECTOR_MUL]] = OpFunction [[VECTOR]] None [[VECTOR_FN]]
|
||||
; CHECK-NEXT: [[A:%.+]] = OpFunctionParameter [[VECTOR]]
|
||||
; CHECK-NEXT: [[B:%.+]] = OpFunctionParameter [[VECTOR]]
|
||||
; CHECK: OpLabel
|
||||
; CHECK: [[C:%.+]] = OpIMul [[VECTOR]] [[A]] [[B]]
|
||||
; CHECK: OpReturnValue [[C]]
|
||||
; CHECK-NEXT: OpFunctionEnd
|
||||
|
||||
|
||||
; Test udiv on vector:
|
||||
define <2 x i16> @vector_udiv(<2 x i16> %a, <2 x i16> %b) {
|
||||
%c = udiv <2 x i16> %a, %b
|
||||
ret <2 x i16> %c
|
||||
}
|
||||
|
||||
; CHECK: [[VECTOR_UDIV]] = OpFunction [[VECTOR]] None [[VECTOR_FN]]
|
||||
; CHECK-NEXT: [[A:%.+]] = OpFunctionParameter [[VECTOR]]
|
||||
; CHECK-NEXT: [[B:%.+]] = OpFunctionParameter [[VECTOR]]
|
||||
; CHECK: OpLabel
|
||||
; CHECK: [[C:%.+]] = OpUDiv [[VECTOR]] [[A]] [[B]]
|
||||
; CHECK: OpReturnValue [[C]]
|
||||
; CHECK-NEXT: OpFunctionEnd
|
||||
|
||||
|
||||
; Test sdiv on vector:
|
||||
define <2 x i16> @vector_sdiv(<2 x i16> %a, <2 x i16> %b) {
|
||||
%c = sdiv <2 x i16> %a, %b
|
||||
ret <2 x i16> %c
|
||||
}
|
||||
|
||||
; CHECK: [[VECTOR_SDIV]] = OpFunction [[VECTOR]] None [[VECTOR_FN]]
|
||||
; CHECK-NEXT: [[A:%.+]] = OpFunctionParameter [[VECTOR]]
|
||||
; CHECK-NEXT: [[B:%.+]] = OpFunctionParameter [[VECTOR]]
|
||||
; CHECK: OpLabel
|
||||
; CHECK: [[C:%.+]] = OpSDiv [[VECTOR]] [[A]] [[B]]
|
||||
; CHECK: OpReturnValue [[C]]
|
||||
; CHECK-NEXT: OpFunctionEnd
|
|
@ -0,0 +1,94 @@
|
|||
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
|
||||
|
||||
target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024"
|
||||
|
||||
; CHECK-SPIRV: OpName %[[#NAME_FSHL_FUNC_32:]] "spirv.llvm_fshl_i32"
|
||||
; CHECK-SPIRV: OpName %[[#NAME_FSHL_FUNC_16:]] "spirv.llvm_fshl_i16"
|
||||
; CHECK-SPIRV: OpName %[[#NAME_FSHL_FUNC_VEC_INT_16:]] "spirv.llvm_fshl_v2i16"
|
||||
; CHECK-SPIRV: %[[#TYPE_INT_32:]] = OpTypeInt 32 0
|
||||
; CHECK-SPIRV: %[[#TYPE_ORIG_FUNC_32:]] = OpTypeFunction %[[#TYPE_INT_32]] %[[#TYPE_INT_32]] %[[#TYPE_INT_32]]
|
||||
; CHECK-SPIRV: %[[#TYPE_INT_16:]] = OpTypeInt 16 0
|
||||
; CHECK-SPIRV: %[[#TYPE_ORIG_FUNC_16:]] = OpTypeFunction %[[#TYPE_INT_16]] %[[#TYPE_INT_16]] %[[#TYPE_INT_16]]
|
||||
; CHECK-SPIRV: %[[#TYPE_VEC_INT_16:]] = OpTypeVector %[[#TYPE_INT_16]] 2
|
||||
; CHECK-SPIRV: %[[#TYPE_ORIG_FUNC_VEC_INT_16:]] = OpTypeFunction %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]]
|
||||
; CHECK-SPIRV: %[[#TYPE_FSHL_FUNC_32:]] = OpTypeFunction %[[#TYPE_INT_32]] %[[#TYPE_INT_32]] %[[#TYPE_INT_32]] %[[#TYPE_INT_32]]
|
||||
; CHECK-SPIRV: %[[#TYPE_FSHL_FUNC_16:]] = OpTypeFunction %[[#TYPE_INT_16]] %[[#TYPE_INT_16]] %[[#TYPE_INT_16]] %[[#TYPE_INT_16]]
|
||||
; CHECK-SPIRV: %[[#TYPE_FSHL_FUNC_VEC_INT_16:]] = OpTypeFunction %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]]
|
||||
; CHECK-SPIRV-DAG: %[[#CONST_ROTATE_32:]] = OpConstant %[[#TYPE_INT_32]] 8
|
||||
; CHECK-SPIRV-DAG: %[[#CONST_ROTATE_16:]] = OpConstant %[[#TYPE_INT_16]] 8
|
||||
; CHECK-SPIRV: %[[#CONST_ROTATE_VEC_INT_16:]] = OpConstantComposite %[[#TYPE_VEC_INT_16]] %[[#CONST_ROTATE_16]] %[[#CONST_ROTATE_16]]
|
||||
; CHECK-SPIRV-DAG: %[[#CONST_TYPE_SIZE_32:]] = OpConstant %[[#TYPE_INT_32]] 32
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
; CHECK-SPIRV: %[[#]] = OpFunction %[[#TYPE_INT_32]] {{.*}} %[[#TYPE_ORIG_FUNC_32]]
|
||||
; CHECK-SPIRV: %[[#X:]] = OpFunctionParameter %[[#TYPE_INT_32]]
|
||||
; CHECK-SPIRV: %[[#Y:]] = OpFunctionParameter %[[#TYPE_INT_32]]
|
||||
define spir_func i32 @Test_i32(i32 %x, i32 %y) local_unnamed_addr #0 {
|
||||
entry:
|
||||
; CHECK-SPIRV: %[[#CALL_32_X_Y:]] = OpFunctionCall %[[#TYPE_INT_32]] %[[#NAME_FSHL_FUNC_32]] %[[#X]] %[[#Y]] %[[#CONST_ROTATE_32]]
|
||||
%0 = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 8)
|
||||
; CHECK-SPIRV: %[[#CALL_32_Y_X:]] = OpFunctionCall %[[#TYPE_INT_32]] %[[#NAME_FSHL_FUNC_32]] %[[#Y]] %[[#X]] %[[#CONST_ROTATE_32]]
|
||||
%1 = call i32 @llvm.fshl.i32(i32 %y, i32 %x, i32 8)
|
||||
; CHECK-SPIRV: %[[#ADD_32:]] = OpIAdd %[[#TYPE_INT_32]] %[[#CALL_32_X_Y]] %[[#CALL_32_Y_X]]
|
||||
%sum = add i32 %0, %1
|
||||
; CHECK-SPIRV: OpReturnValue %[[#ADD_32]]
|
||||
ret i32 %sum
|
||||
}
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
; CHECK-SPIRV: %[[#]] = OpFunction %[[#TYPE_INT_16]] {{.*}} %[[#TYPE_ORIG_FUNC_16]]
|
||||
; CHECK-SPIRV: %[[#X:]] = OpFunctionParameter %[[#TYPE_INT_16]]
|
||||
; CHECK-SPIRV: %[[#Y:]] = OpFunctionParameter %[[#TYPE_INT_16]]
|
||||
define spir_func i16 @Test_i16(i16 %x, i16 %y) local_unnamed_addr #0 {
|
||||
entry:
|
||||
; CHECK-SPIRV: %[[#CALL_16:]] = OpFunctionCall %[[#TYPE_INT_16]] %[[#NAME_FSHL_FUNC_16]] %[[#X]] %[[#Y]] %[[#CONST_ROTATE_16]]
|
||||
%0 = call i16 @llvm.fshl.i16(i16 %x, i16 %y, i16 8)
|
||||
; CHECK-SPIRV: OpReturnValue %[[#CALL_16]]
|
||||
ret i16 %0
|
||||
}
|
||||
|
||||
; CHECK-SPIRV: %[[#]] = OpFunction %[[#TYPE_VEC_INT_16]] {{.*}} %[[#TYPE_ORIG_FUNC_VEC_INT_16]]
|
||||
; CHECK-SPIRV: %[[#X:]] = OpFunctionParameter %[[#TYPE_VEC_INT_16]]
|
||||
; CHECK-SPIRV: %[[#Y:]] = OpFunctionParameter %[[#TYPE_VEC_INT_16]]
|
||||
define spir_func <2 x i16> @Test_v2i16(<2 x i16> %x, <2 x i16> %y) local_unnamed_addr #0 {
|
||||
entry:
|
||||
; CHECK-SPIRV: %[[#CALL_VEC_INT_16:]] = OpFunctionCall %[[#TYPE_VEC_INT_16]] %[[#NAME_FSHL_FUNC_VEC_INT_16]] %[[#X]] %[[#Y]] %[[#CONST_ROTATE_VEC_INT_16]]
|
||||
%0 = call <2 x i16> @llvm.fshl.v2i16(<2 x i16> %x, <2 x i16> %y, <2 x i16> <i16 8, i16 8>)
|
||||
; CHECK-SPIRV: OpReturnValue %[[#CALL_VEC_INT_16]]
|
||||
ret <2 x i16> %0
|
||||
}
|
||||
|
||||
; CHECK-SPIRV: %[[#NAME_FSHL_FUNC_32]] = OpFunction %[[#TYPE_INT_32]] {{.*}} %[[#TYPE_FSHL_FUNC_32]]
|
||||
; CHECK-SPIRV: %[[#X_FSHL:]] = OpFunctionParameter %[[#TYPE_INT_32]]
|
||||
; CHECK-SPIRV: %[[#Y_FSHL:]] = OpFunctionParameter %[[#TYPE_INT_32]]
|
||||
; CHECK-SPIRV: %[[#ROT:]] = OpFunctionParameter %[[#TYPE_INT_32]]
|
||||
|
||||
; CHECK-SPIRV: %[[#ROTATE_MOD_SIZE:]] = OpUMod %[[#TYPE_INT_32]] %[[#ROT]] %[[#CONST_TYPE_SIZE_32]]
|
||||
; CHECK-SPIRV: %[[#X_SHIFT_LEFT:]] = OpShiftLeftLogical %[[#TYPE_INT_32]] %[[#X_FSHL]] %[[#ROTATE_MOD_SIZE]]
|
||||
; CHECK-SPIRV: %[[#NEG_ROTATE:]] = OpISub %[[#TYPE_INT_32]] %[[#CONST_TYPE_SIZE_32]] %[[#ROTATE_MOD_SIZE]]
|
||||
; CHECK-SPIRV: %[[#Y_SHIFT_RIGHT:]] = OpShiftRightLogical %[[#TYPE_INT_32]] %[[#Y_FSHL]] %[[#NEG_ROTATE]]
|
||||
; CHECK-SPIRV: %[[#FSHL_RESULT:]] = OpBitwiseOr %[[#TYPE_INT_32]] %[[#X_SHIFT_LEFT]] %[[#Y_SHIFT_RIGHT]]
|
||||
; CHECK-SPIRV: OpReturnValue %[[#FSHL_RESULT]]
|
||||
|
||||
; Just check that the function for i16 was generated as such - we've checked the logic for another type.
|
||||
; CHECK-SPIRV: %[[#NAME_FSHL_FUNC_16]] = OpFunction %[[#TYPE_INT_16]] {{.*}} %[[#TYPE_FSHL_FUNC_16]]
|
||||
; CHECK-SPIRV: %[[#X_FSHL:]] = OpFunctionParameter %[[#TYPE_INT_16]]
|
||||
; CHECK-SPIRV: %[[#Y_FSHL:]] = OpFunctionParameter %[[#TYPE_INT_16]]
|
||||
; CHECK-SPIRV: %[[#ROT:]] = OpFunctionParameter %[[#TYPE_INT_16]]
|
||||
|
||||
; Just check that the function for v2i16 was generated as such - we've checked the logic for another type.
|
||||
; CHECK-SPIRV: %[[#NAME_FSHL_FUNC_VEC_INT_16]] = OpFunction %[[#TYPE_VEC_INT_16]] {{.*}} %[[#TYPE_FSHL_FUNC_VEC_INT_16]]
|
||||
; CHECK-SPIRV: %[[#X_FSHL:]] = OpFunctionParameter %[[#TYPE_VEC_INT_16]]
|
||||
; CHECK-SPIRV: %[[#Y_FSHL:]] = OpFunctionParameter %[[#TYPE_VEC_INT_16]]
|
||||
; CHECK-SPIRV: %[[#ROT:]] = OpFunctionParameter %[[#TYPE_VEC_INT_16]]
|
||||
|
||||
; Function Attrs: nounwind readnone speculatable willreturn
|
||||
declare i32 @llvm.fshl.i32(i32, i32, i32) #1
|
||||
|
||||
; Function Attrs: nounwind readnone speculatable willreturn
|
||||
declare i16 @llvm.fshl.i16(i16, i16, i16) #1
|
||||
|
||||
; Function Attrs: nounwind readnone speculatable willreturn
|
||||
declare <2 x i16> @llvm.fshl.v2i16(<2 x i16>, <2 x i16>, <2 x i16>) #1
|
||||
|
||||
attributes #1 = { nounwind readnone speculatable willreturn }
|
|
@ -1,29 +1,20 @@
|
|||
; RUN: split-file %s %t
|
||||
; RUN: llc -O0 %t/metadata-opencl12.ll -o - | FileCheck %t/metadata-opencl12.ll
|
||||
; RUN: llc -O0 %t/metadata-opencl20.ll -o - | FileCheck %t/metadata-opencl20.ll
|
||||
; RUN: llc -O0 %t/metadata-opencl22.ll -o - | FileCheck %t/metadata-opencl22.ll
|
||||
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %t/metadata-opencl12.ll -o - | FileCheck %t/metadata-opencl12.ll
|
||||
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %t/metadata-opencl20.ll -o - | FileCheck %t/metadata-opencl20.ll
|
||||
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %t/metadata-opencl22.ll -o - | FileCheck %t/metadata-opencl22.ll
|
||||
|
||||
;--- metadata-opencl12.ll
|
||||
target triple = "spirv32-unknown-unknown"
|
||||
|
||||
!opencl.ocl.version = !{!0}
|
||||
!0 = !{i32 1, i32 2}
|
||||
|
||||
; We assume the SPIR-V 2.2 environment spec's version format: 0|Maj|Min|Rev|
|
||||
; CHECK: OpSource OpenCL_C 66048
|
||||
; CHECK: OpSource OpenCL_C 102000
|
||||
;--- metadata-opencl20.ll
|
||||
target triple = "spirv32-unknown-unknown"
|
||||
|
||||
!opencl.ocl.version = !{!0}
|
||||
!0 = !{i32 2, i32 0}
|
||||
|
||||
; We assume the SPIR-V 2.2 environment spec's version format: 0|Maj|Min|Rev|
|
||||
; CHECK: OpSource OpenCL_C 131072
|
||||
; CHECK: OpSource OpenCL_C 200000
|
||||
;--- metadata-opencl22.ll
|
||||
target triple = "spirv32-unknown-unknown"
|
||||
|
||||
!opencl.ocl.version = !{!0}
|
||||
!0 = !{i32 2, i32 2}
|
||||
|
||||
; We assume the SPIR-V 2.2 environment spec's version format: 0|Maj|Min|Rev|
|
||||
; CHECK: OpSource OpenCL_C 131584
|
||||
; CHECK: OpSource OpenCL_C 202000
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK
|
||||
|
||||
; CHECK: %[[#Int8Ty:]] = OpTypeInt 8 0
|
||||
; CHECK: %[[#PtrTy:]] = OpTypePointer Function %[[#Int8Ty]]
|
||||
; CHECK: %[[#Int64Ty:]] = OpTypeInt 64 0
|
||||
; CHECK: %[[#FTy:]] = OpTypeFunction %[[#Int64Ty]] %[[#PtrTy]]
|
||||
; CHECK: %[[#Int32Ty:]] = OpTypeInt 32 0
|
||||
; CHECK: %[[#Const:]] = OpConstant %[[#Int32Ty]] 0
|
||||
; CHECK: OpFunction %[[#Int64Ty]] None %[[#FTy]]
|
||||
; CHECK: %[[#Parm:]] = OpFunctionParameter %[[#PtrTy]]
|
||||
; CHECK: OpStore %[[#Parm]] %[[#Const]] Aligned 4
|
||||
; CHECK: %[[#Res:]] = OpLoad %[[#Int64Ty]] %[[#Parm]] Aligned 8
|
||||
; CHECK: OpReturnValue %[[#Res]]
|
||||
|
||||
target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64"
|
||||
|
||||
define i64 @test(ptr %p) {
|
||||
store i32 0, ptr %p
|
||||
%v = load i64, ptr %p
|
||||
ret i64 %v
|
||||
}
|
Loading…
Reference in New Issue