forked from OSchip/llvm-project
1090 lines
44 KiB
C++
1090 lines
44 KiB
C++
//===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
// This file implements the targeting of the InstructionSelector class for
|
|
// SPIRV.
|
|
// TODO: This should be generated by TableGen.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "SPIRV.h"
|
|
#include "SPIRVGlobalRegistry.h"
|
|
#include "SPIRVInstrInfo.h"
|
|
#include "SPIRVRegisterBankInfo.h"
|
|
#include "SPIRVRegisterInfo.h"
|
|
#include "SPIRVTargetMachine.h"
|
|
#include "SPIRVUtils.h"
|
|
#include "llvm/ADT/APFloat.h"
|
|
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
|
|
#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#define DEBUG_TYPE "spirv-isel"
|
|
|
|
using namespace llvm;
|
|
|
|
namespace {
|
|
|
|
#define GET_GLOBALISEL_PREDICATE_BITSET
|
|
#include "SPIRVGenGlobalISel.inc"
|
|
#undef GET_GLOBALISEL_PREDICATE_BITSET
|
|
|
|
class SPIRVInstructionSelector : public InstructionSelector {
|
|
const SPIRVSubtarget &STI;
|
|
const SPIRVInstrInfo &TII;
|
|
const SPIRVRegisterInfo &TRI;
|
|
const RegisterBankInfo &RBI;
|
|
SPIRVGlobalRegistry &GR;
|
|
MachineRegisterInfo *MRI;
|
|
|
|
public:
|
|
SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
|
|
const SPIRVSubtarget &ST,
|
|
const RegisterBankInfo &RBI);
|
|
void setupMF(MachineFunction &MF, GISelKnownBits *KB,
|
|
CodeGenCoverage &CoverageInfo, ProfileSummaryInfo *PSI,
|
|
BlockFrequencyInfo *BFI) override;
|
|
// Common selection code. Instruction-specific selection occurs in spvSelect.
|
|
bool select(MachineInstr &I) override;
|
|
static const char *getName() { return DEBUG_TYPE; }
|
|
|
|
#define GET_GLOBALISEL_PREDICATES_DECL
|
|
#include "SPIRVGenGlobalISel.inc"
|
|
#undef GET_GLOBALISEL_PREDICATES_DECL
|
|
|
|
#define GET_GLOBALISEL_TEMPORARIES_DECL
|
|
#include "SPIRVGenGlobalISel.inc"
|
|
#undef GET_GLOBALISEL_TEMPORARIES_DECL
|
|
|
|
private:
|
|
// tblgen-erated 'select' implementation, used as the initial selector for
|
|
// the patterns that don't require complex C++.
|
|
bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
|
|
|
|
// All instruction-specific selection that didn't happen in "select()".
|
|
// Is basically a large Switch/Case delegating to all other select method.
|
|
bool spvSelect(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
|
|
bool selectGlobalValue(Register ResVReg, MachineInstr &I,
|
|
const MachineInstr *Init = nullptr) const;
|
|
|
|
bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I, Register SrcReg,
|
|
unsigned Opcode) const;
|
|
bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
|
|
unsigned Opcode) const;
|
|
|
|
bool selectLoad(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
bool selectStore(MachineInstr &I) const;
|
|
|
|
bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
|
|
|
|
bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I, unsigned NewOpcode) const;
|
|
|
|
bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
|
|
bool selectFence(MachineInstr &I) const;
|
|
|
|
bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
|
|
bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
|
|
bool selectConstVector(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
|
|
bool selectCmp(Register ResVReg, const SPIRVType *ResType,
|
|
unsigned comparisonOpcode, MachineInstr &I) const;
|
|
|
|
bool selectICmp(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
|
|
void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
|
|
int OpIdx) const;
|
|
void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
|
|
int OpIdx) const;
|
|
|
|
bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
|
|
MachineInstr &I) const;
|
|
|
|
bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
|
|
bool IsSigned) const;
|
|
bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
|
|
bool IsSigned, unsigned Opcode) const;
|
|
bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
|
|
bool IsSigned) const;
|
|
|
|
bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
|
|
bool selectIntToBool(Register IntReg, Register ResVReg,
|
|
const SPIRVType *intTy, const SPIRVType *boolTy,
|
|
MachineInstr &I) const;
|
|
|
|
bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
|
|
bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
|
|
bool selectBranch(MachineInstr &I) const;
|
|
bool selectBranchCond(MachineInstr &I) const;
|
|
|
|
bool selectPhi(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
|
|
Register buildI32Constant(uint32_t Val, MachineInstr &I,
|
|
const SPIRVType *ResType = nullptr) const;
|
|
|
|
Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
|
|
Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
};
|
|
|
|
} // end anonymous namespace
|
|
|
|
#define GET_GLOBALISEL_IMPL
|
|
#include "SPIRVGenGlobalISel.inc"
|
|
#undef GET_GLOBALISEL_IMPL
|
|
|
|
SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
|
|
const SPIRVSubtarget &ST,
|
|
const RegisterBankInfo &RBI)
|
|
: InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
|
|
TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
|
|
#define GET_GLOBALISEL_PREDICATES_INIT
|
|
#include "SPIRVGenGlobalISel.inc"
|
|
#undef GET_GLOBALISEL_PREDICATES_INIT
|
|
#define GET_GLOBALISEL_TEMPORARIES_INIT
|
|
#include "SPIRVGenGlobalISel.inc"
|
|
#undef GET_GLOBALISEL_TEMPORARIES_INIT
|
|
{
|
|
}
|
|
|
|
void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
|
|
CodeGenCoverage &CoverageInfo,
|
|
ProfileSummaryInfo *PSI,
|
|
BlockFrequencyInfo *BFI) {
|
|
MRI = &MF.getRegInfo();
|
|
GR.setCurrentFunc(MF);
|
|
InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
|
|
}
|
|
|
|
// Defined in SPIRVLegalizerInfo.cpp.
|
|
extern bool isTypeFoldingSupported(unsigned Opcode);
|
|
|
|
bool SPIRVInstructionSelector::select(MachineInstr &I) {
|
|
assert(I.getParent() && "Instruction should be in a basic block!");
|
|
assert(I.getParent()->getParent() && "Instruction should be in a function!");
|
|
|
|
Register Opcode = I.getOpcode();
|
|
// If it's not a GMIR instruction, we've selected it already.
|
|
if (!isPreISelGenericOpcode(Opcode)) {
|
|
if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
|
|
auto *Def = MRI->getVRegDef(I.getOperand(1).getReg());
|
|
if (isTypeFoldingSupported(Def->getOpcode())) {
|
|
auto Res = selectImpl(I, *CoverageInfo);
|
|
assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
|
|
if (Res)
|
|
return Res;
|
|
}
|
|
MRI->replaceRegWith(I.getOperand(1).getReg(), I.getOperand(0).getReg());
|
|
I.removeFromParent();
|
|
} else if (I.getNumDefs() == 1) {
|
|
// Make all vregs 32 bits (for SPIR-V IDs).
|
|
MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32));
|
|
}
|
|
return true;
|
|
}
|
|
|
|
if (I.getNumOperands() != I.getNumExplicitOperands()) {
|
|
LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
|
|
return false;
|
|
}
|
|
|
|
// Common code for getting return reg+type, and removing selected instr
|
|
// from parent occurs here. Instr-specific selection happens in spvSelect().
|
|
bool HasDefs = I.getNumDefs() > 0;
|
|
Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
|
|
SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
|
|
assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
|
|
if (spvSelect(ResVReg, ResType, I)) {
|
|
if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs).
|
|
MRI->setType(ResVReg, LLT::scalar(32));
|
|
I.removeFromParent();
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
assert(!isTypeFoldingSupported(I.getOpcode()) ||
|
|
I.getOpcode() == TargetOpcode::G_CONSTANT);
|
|
const unsigned Opcode = I.getOpcode();
|
|
switch (Opcode) {
|
|
case TargetOpcode::G_CONSTANT:
|
|
return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
|
|
I);
|
|
case TargetOpcode::G_GLOBAL_VALUE:
|
|
return selectGlobalValue(ResVReg, I);
|
|
case TargetOpcode::G_IMPLICIT_DEF:
|
|
return selectOpUndef(ResVReg, ResType, I);
|
|
|
|
case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
|
|
return selectIntrinsic(ResVReg, ResType, I);
|
|
case TargetOpcode::G_BITREVERSE:
|
|
return selectBitreverse(ResVReg, ResType, I);
|
|
|
|
case TargetOpcode::G_BUILD_VECTOR:
|
|
return selectConstVector(ResVReg, ResType, I);
|
|
|
|
case TargetOpcode::G_SHUFFLE_VECTOR: {
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addUse(I.getOperand(1).getReg())
|
|
.addUse(I.getOperand(2).getReg());
|
|
for (auto V : I.getOperand(3).getShuffleMask())
|
|
MIB.addImm(V);
|
|
return MIB.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
case TargetOpcode::G_MEMMOVE:
|
|
case TargetOpcode::G_MEMCPY:
|
|
return selectMemOperation(ResVReg, I);
|
|
|
|
case TargetOpcode::G_ICMP:
|
|
return selectICmp(ResVReg, ResType, I);
|
|
case TargetOpcode::G_FCMP:
|
|
return selectFCmp(ResVReg, ResType, I);
|
|
|
|
case TargetOpcode::G_FRAME_INDEX:
|
|
return selectFrameIndex(ResVReg, ResType, I);
|
|
|
|
case TargetOpcode::G_LOAD:
|
|
return selectLoad(ResVReg, ResType, I);
|
|
case TargetOpcode::G_STORE:
|
|
return selectStore(I);
|
|
|
|
case TargetOpcode::G_BR:
|
|
return selectBranch(I);
|
|
case TargetOpcode::G_BRCOND:
|
|
return selectBranchCond(I);
|
|
|
|
case TargetOpcode::G_PHI:
|
|
return selectPhi(ResVReg, ResType, I);
|
|
|
|
case TargetOpcode::G_FPTOSI:
|
|
return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
|
|
case TargetOpcode::G_FPTOUI:
|
|
return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
|
|
|
|
case TargetOpcode::G_SITOFP:
|
|
return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
|
|
case TargetOpcode::G_UITOFP:
|
|
return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
|
|
|
|
case TargetOpcode::G_CTPOP:
|
|
return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
|
|
|
|
case TargetOpcode::G_SEXT:
|
|
return selectExt(ResVReg, ResType, I, true);
|
|
case TargetOpcode::G_ANYEXT:
|
|
case TargetOpcode::G_ZEXT:
|
|
return selectExt(ResVReg, ResType, I, false);
|
|
case TargetOpcode::G_TRUNC:
|
|
return selectTrunc(ResVReg, ResType, I);
|
|
case TargetOpcode::G_FPTRUNC:
|
|
case TargetOpcode::G_FPEXT:
|
|
return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
|
|
|
|
case TargetOpcode::G_PTRTOINT:
|
|
return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
|
|
case TargetOpcode::G_INTTOPTR:
|
|
return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
|
|
case TargetOpcode::G_BITCAST:
|
|
return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
|
|
case TargetOpcode::G_ADDRSPACE_CAST:
|
|
return selectAddrSpaceCast(ResVReg, ResType, I);
|
|
|
|
case TargetOpcode::G_ATOMICRMW_OR:
|
|
return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
|
|
case TargetOpcode::G_ATOMICRMW_ADD:
|
|
return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
|
|
case TargetOpcode::G_ATOMICRMW_AND:
|
|
return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
|
|
case TargetOpcode::G_ATOMICRMW_MAX:
|
|
return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
|
|
case TargetOpcode::G_ATOMICRMW_MIN:
|
|
return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
|
|
case TargetOpcode::G_ATOMICRMW_SUB:
|
|
return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
|
|
case TargetOpcode::G_ATOMICRMW_XOR:
|
|
return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
|
|
case TargetOpcode::G_ATOMICRMW_UMAX:
|
|
return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
|
|
case TargetOpcode::G_ATOMICRMW_UMIN:
|
|
return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
|
|
case TargetOpcode::G_ATOMICRMW_XCHG:
|
|
return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
|
|
case TargetOpcode::G_ATOMIC_CMPXCHG:
|
|
return selectAtomicCmpXchg(ResVReg, ResType, I);
|
|
|
|
case TargetOpcode::G_FENCE:
|
|
return selectFence(I);
|
|
|
|
default:
|
|
return false;
|
|
}
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I,
|
|
Register SrcReg,
|
|
unsigned Opcode) const {
|
|
return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addUse(SrcReg)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I,
|
|
unsigned Opcode) const {
|
|
return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
|
|
Opcode);
|
|
}
|
|
|
|
static SPIRV::MemorySemantics getMemSemantics(AtomicOrdering Ord) {
|
|
switch (Ord) {
|
|
case AtomicOrdering::Acquire:
|
|
return SPIRV::MemorySemantics::Acquire;
|
|
case AtomicOrdering::Release:
|
|
return SPIRV::MemorySemantics::Release;
|
|
case AtomicOrdering::AcquireRelease:
|
|
return SPIRV::MemorySemantics::AcquireRelease;
|
|
case AtomicOrdering::SequentiallyConsistent:
|
|
return SPIRV::MemorySemantics::SequentiallyConsistent;
|
|
case AtomicOrdering::Unordered:
|
|
case AtomicOrdering::Monotonic:
|
|
case AtomicOrdering::NotAtomic:
|
|
default:
|
|
return SPIRV::MemorySemantics::None;
|
|
}
|
|
}
|
|
|
|
static SPIRV::Scope getScope(SyncScope::ID Ord) {
|
|
switch (Ord) {
|
|
case SyncScope::SingleThread:
|
|
return SPIRV::Scope::Invocation;
|
|
case SyncScope::System:
|
|
return SPIRV::Scope::Device;
|
|
default:
|
|
llvm_unreachable("Unsupported synchronization Scope ID.");
|
|
}
|
|
}
|
|
|
|
static void addMemoryOperands(MachineMemOperand *MemOp,
|
|
MachineInstrBuilder &MIB) {
|
|
uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
|
|
if (MemOp->isVolatile())
|
|
SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
|
|
if (MemOp->isNonTemporal())
|
|
SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
|
|
if (MemOp->getAlign().value())
|
|
SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
|
|
|
|
if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
|
|
MIB.addImm(SpvMemOp);
|
|
if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
|
|
MIB.addImm(MemOp->getAlign().value());
|
|
}
|
|
}
|
|
|
|
static void addMemoryOperands(uint64_t Flags, MachineInstrBuilder &MIB) {
|
|
uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
|
|
if (Flags & MachineMemOperand::Flags::MOVolatile)
|
|
SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
|
|
if (Flags & MachineMemOperand::Flags::MONonTemporal)
|
|
SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
|
|
|
|
if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
|
|
MIB.addImm(SpvMemOp);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
unsigned OpOffset =
|
|
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ? 1 : 0;
|
|
Register Ptr = I.getOperand(1 + OpOffset).getReg();
|
|
auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addUse(Ptr);
|
|
if (!I.getNumMemOperands()) {
|
|
assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
|
|
addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
|
|
} else {
|
|
addMemoryOperands(*I.memoperands_begin(), MIB);
|
|
}
|
|
return MIB.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
|
|
unsigned OpOffset =
|
|
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ? 1 : 0;
|
|
Register StoreVal = I.getOperand(0 + OpOffset).getReg();
|
|
Register Ptr = I.getOperand(1 + OpOffset).getReg();
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
|
|
.addUse(Ptr)
|
|
.addUse(StoreVal);
|
|
if (!I.getNumMemOperands()) {
|
|
assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
|
|
addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
|
|
} else {
|
|
addMemoryOperands(*I.memoperands_begin(), MIB);
|
|
}
|
|
return MIB.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
|
|
MachineInstr &I) const {
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
|
|
.addDef(I.getOperand(0).getReg())
|
|
.addUse(I.getOperand(1).getReg())
|
|
.addUse(I.getOperand(2).getReg());
|
|
if (I.getNumMemOperands())
|
|
addMemoryOperands(*I.memoperands_begin(), MIB);
|
|
bool Result = MIB.constrainAllUses(TII, TRI, RBI);
|
|
if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg()) {
|
|
BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
|
|
.addUse(MIB->getOperand(0).getReg());
|
|
}
|
|
return Result;
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I,
|
|
unsigned NewOpcode) const {
|
|
assert(I.hasOneMemOperand());
|
|
const MachineMemOperand *MemOp = *I.memoperands_begin();
|
|
uint32_t Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
|
|
Register ScopeReg = buildI32Constant(Scope, I);
|
|
|
|
Register Ptr = I.getOperand(1).getReg();
|
|
// TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
|
|
// auto ScSem =
|
|
// getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
|
|
AtomicOrdering AO = MemOp->getSuccessOrdering();
|
|
uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
|
|
Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
|
|
|
|
return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addUse(Ptr)
|
|
.addUse(ScopeReg)
|
|
.addUse(MemSemReg)
|
|
.addUse(I.getOperand(2).getReg())
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
|
|
AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
|
|
uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
|
|
Register MemSemReg = buildI32Constant(MemSem, I);
|
|
SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
|
|
uint32_t Scope = static_cast<uint32_t>(getScope(Ord));
|
|
Register ScopeReg = buildI32Constant(Scope, I);
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
|
|
.addUse(ScopeReg)
|
|
.addUse(MemSemReg)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
assert(I.hasOneMemOperand());
|
|
const MachineMemOperand *MemOp = *I.memoperands_begin();
|
|
uint32_t Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
|
|
Register ScopeReg = buildI32Constant(Scope, I);
|
|
|
|
Register Ptr = I.getOperand(2).getReg();
|
|
Register Cmp = I.getOperand(3).getReg();
|
|
Register Val = I.getOperand(4).getReg();
|
|
|
|
SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
|
|
SPIRV::StorageClass SC = GR.getPointerStorageClass(Ptr);
|
|
uint32_t ScSem = static_cast<uint32_t>(getMemSemanticsForStorageClass(SC));
|
|
AtomicOrdering AO = MemOp->getSuccessOrdering();
|
|
uint32_t MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
|
|
Register MemSemEqReg = buildI32Constant(MemSemEq, I);
|
|
AtomicOrdering FO = MemOp->getFailureOrdering();
|
|
uint32_t MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
|
|
Register MemSemNeqReg =
|
|
MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
|
|
const DebugLoc &DL = I.getDebugLoc();
|
|
return BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(SpvValTy))
|
|
.addUse(Ptr)
|
|
.addUse(ScopeReg)
|
|
.addUse(MemSemEqReg)
|
|
.addUse(MemSemNeqReg)
|
|
.addUse(Val)
|
|
.addUse(Cmp)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
static bool isGenericCastablePtr(SPIRV::StorageClass SC) {
|
|
switch (SC) {
|
|
case SPIRV::StorageClass::Workgroup:
|
|
case SPIRV::StorageClass::CrossWorkgroup:
|
|
case SPIRV::StorageClass::Function:
|
|
return true;
|
|
default:
|
|
return false;
|
|
}
|
|
}
|
|
|
|
// In SPIR-V address space casting can only happen to and from the Generic
|
|
// storage class. We can also only case Workgroup, CrossWorkgroup, or Function
|
|
// pointers to and from Generic pointers. As such, we can convert e.g. from
|
|
// Workgroup to Function by going via a Generic pointer as an intermediary. All
|
|
// other combinations can only be done by a bitcast, and are probably not safe.
|
|
bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
Register SrcPtr = I.getOperand(1).getReg();
|
|
SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
|
|
SPIRV::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
|
|
SPIRV::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
|
|
|
|
// Casting from an eligable pointer to Generic.
|
|
if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
|
|
return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
|
|
// Casting from Generic to an eligable pointer.
|
|
if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
|
|
return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
|
|
// Casting between 2 eligable pointers using Generic as an intermediary.
|
|
if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
|
|
Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass);
|
|
SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
|
|
SrcPtrTy, I, TII, SPIRV::StorageClass::Generic);
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
const DebugLoc &DL = I.getDebugLoc();
|
|
bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
|
|
.addDef(Tmp)
|
|
.addUse(GR.getSPIRVTypeID(GenericPtrTy))
|
|
.addUse(SrcPtr)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addUse(Tmp)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
// TODO Should this case just be disallowed completely?
|
|
// We're casting 2 other arbitrary address spaces, so have to bitcast.
|
|
return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
|
|
}
|
|
|
|
static unsigned getFCmpOpcode(unsigned PredNum) {
|
|
auto Pred = static_cast<CmpInst::Predicate>(PredNum);
|
|
switch (Pred) {
|
|
case CmpInst::FCMP_OEQ:
|
|
return SPIRV::OpFOrdEqual;
|
|
case CmpInst::FCMP_OGE:
|
|
return SPIRV::OpFOrdGreaterThanEqual;
|
|
case CmpInst::FCMP_OGT:
|
|
return SPIRV::OpFOrdGreaterThan;
|
|
case CmpInst::FCMP_OLE:
|
|
return SPIRV::OpFOrdLessThanEqual;
|
|
case CmpInst::FCMP_OLT:
|
|
return SPIRV::OpFOrdLessThan;
|
|
case CmpInst::FCMP_ONE:
|
|
return SPIRV::OpFOrdNotEqual;
|
|
case CmpInst::FCMP_ORD:
|
|
return SPIRV::OpOrdered;
|
|
case CmpInst::FCMP_UEQ:
|
|
return SPIRV::OpFUnordEqual;
|
|
case CmpInst::FCMP_UGE:
|
|
return SPIRV::OpFUnordGreaterThanEqual;
|
|
case CmpInst::FCMP_UGT:
|
|
return SPIRV::OpFUnordGreaterThan;
|
|
case CmpInst::FCMP_ULE:
|
|
return SPIRV::OpFUnordLessThanEqual;
|
|
case CmpInst::FCMP_ULT:
|
|
return SPIRV::OpFUnordLessThan;
|
|
case CmpInst::FCMP_UNE:
|
|
return SPIRV::OpFUnordNotEqual;
|
|
case CmpInst::FCMP_UNO:
|
|
return SPIRV::OpUnordered;
|
|
default:
|
|
llvm_unreachable("Unknown predicate type for FCmp");
|
|
}
|
|
}
|
|
|
|
static unsigned getICmpOpcode(unsigned PredNum) {
|
|
auto Pred = static_cast<CmpInst::Predicate>(PredNum);
|
|
switch (Pred) {
|
|
case CmpInst::ICMP_EQ:
|
|
return SPIRV::OpIEqual;
|
|
case CmpInst::ICMP_NE:
|
|
return SPIRV::OpINotEqual;
|
|
case CmpInst::ICMP_SGE:
|
|
return SPIRV::OpSGreaterThanEqual;
|
|
case CmpInst::ICMP_SGT:
|
|
return SPIRV::OpSGreaterThan;
|
|
case CmpInst::ICMP_SLE:
|
|
return SPIRV::OpSLessThanEqual;
|
|
case CmpInst::ICMP_SLT:
|
|
return SPIRV::OpSLessThan;
|
|
case CmpInst::ICMP_UGE:
|
|
return SPIRV::OpUGreaterThanEqual;
|
|
case CmpInst::ICMP_UGT:
|
|
return SPIRV::OpUGreaterThan;
|
|
case CmpInst::ICMP_ULE:
|
|
return SPIRV::OpULessThanEqual;
|
|
case CmpInst::ICMP_ULT:
|
|
return SPIRV::OpULessThan;
|
|
default:
|
|
llvm_unreachable("Unknown predicate type for ICmp");
|
|
}
|
|
}
|
|
|
|
static unsigned getPtrCmpOpcode(unsigned Pred) {
|
|
switch (static_cast<CmpInst::Predicate>(Pred)) {
|
|
case CmpInst::ICMP_EQ:
|
|
return SPIRV::OpPtrEqual;
|
|
case CmpInst::ICMP_NE:
|
|
return SPIRV::OpPtrNotEqual;
|
|
default:
|
|
llvm_unreachable("Unknown predicate type for pointer comparison");
|
|
}
|
|
}
|
|
|
|
// Return the logical operation, or abort if none exists.
|
|
static unsigned getBoolCmpOpcode(unsigned PredNum) {
|
|
auto Pred = static_cast<CmpInst::Predicate>(PredNum);
|
|
switch (Pred) {
|
|
case CmpInst::ICMP_EQ:
|
|
return SPIRV::OpLogicalEqual;
|
|
case CmpInst::ICMP_NE:
|
|
return SPIRV::OpLogicalNotEqual;
|
|
default:
|
|
llvm_unreachable("Unknown predicate type for Bool comparison");
|
|
}
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addUse(I.getOperand(1).getReg())
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectConstVector(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
// TODO: only const case is supported for now.
|
|
assert(std::all_of(
|
|
I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) {
|
|
if (MO.isDef())
|
|
return true;
|
|
if (!MO.isReg())
|
|
return false;
|
|
SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
|
|
assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
|
|
ConstTy->getOperand(1).isReg());
|
|
Register ConstReg = ConstTy->getOperand(1).getReg();
|
|
const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
|
|
assert(Const);
|
|
return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
|
|
Const->getOpcode() == TargetOpcode::G_FCONSTANT);
|
|
}));
|
|
|
|
auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
|
|
TII.get(SPIRV::OpConstantComposite))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType));
|
|
for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
|
|
MIB.addUse(I.getOperand(i).getReg());
|
|
return MIB.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
unsigned CmpOpc,
|
|
MachineInstr &I) const {
|
|
Register Cmp0 = I.getOperand(2).getReg();
|
|
Register Cmp1 = I.getOperand(3).getReg();
|
|
assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
|
|
GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
|
|
"CMP operands should have the same type");
|
|
return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addUse(Cmp0)
|
|
.addUse(Cmp1)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
auto Pred = I.getOperand(1).getPredicate();
|
|
unsigned CmpOpc;
|
|
|
|
Register CmpOperand = I.getOperand(2).getReg();
|
|
if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
|
|
CmpOpc = getPtrCmpOpcode(Pred);
|
|
else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
|
|
CmpOpc = getBoolCmpOpcode(Pred);
|
|
else
|
|
CmpOpc = getICmpOpcode(Pred);
|
|
return selectCmp(ResVReg, ResType, CmpOpc, I);
|
|
}
|
|
|
|
void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB,
|
|
const MachineInstr &I,
|
|
int OpIdx) const {
|
|
assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
|
|
"Expected G_FCONSTANT");
|
|
const ConstantFP *FPImm = I.getOperand(1).getFPImm();
|
|
addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
|
|
}
|
|
|
|
void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
|
|
const MachineInstr &I,
|
|
int OpIdx) const {
|
|
assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
|
|
"Expected G_CONSTANT");
|
|
addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
|
|
}
|
|
|
|
Register
|
|
SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
|
|
const SPIRVType *ResType) const {
|
|
const SPIRVType *SpvI32Ty =
|
|
ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
|
|
Register NewReg;
|
|
NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
|
|
MachineInstr *MI;
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
if (Val == 0) {
|
|
MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
|
|
.addDef(NewReg)
|
|
.addUse(GR.getSPIRVTypeID(SpvI32Ty));
|
|
} else {
|
|
MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
|
|
.addDef(NewReg)
|
|
.addUse(GR.getSPIRVTypeID(SpvI32Ty))
|
|
.addImm(APInt(32, Val).getZExtValue());
|
|
}
|
|
constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
|
|
return NewReg;
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
|
|
return selectCmp(ResVReg, ResType, CmpOp, I);
|
|
}
|
|
|
|
Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
return buildI32Constant(0, I, ResType);
|
|
}
|
|
|
|
Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
|
|
APInt One = AllOnes ? APInt::getAllOnesValue(BitWidth)
|
|
: APInt::getOneBitSet(BitWidth, 0);
|
|
Register OneReg = buildI32Constant(One.getZExtValue(), I, ResType);
|
|
if (ResType->getOpcode() == SPIRV::OpTypeVector) {
|
|
const unsigned NumEles = ResType->getOperand(2).getImm();
|
|
Register OneVec = MRI->createVirtualRegister(&SPIRV::IDRegClass);
|
|
unsigned Opcode = SPIRV::OpConstantComposite;
|
|
auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
|
|
.addDef(OneVec)
|
|
.addUse(GR.getSPIRVTypeID(ResType));
|
|
for (unsigned i = 0; i < NumEles; ++i)
|
|
MIB.addUse(OneReg);
|
|
constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
|
|
return OneVec;
|
|
}
|
|
return OneReg;
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I,
|
|
bool IsSigned) const {
|
|
// To extend a bool, we need to use OpSelect between constants.
|
|
Register ZeroReg = buildZerosVal(ResType, I);
|
|
Register OneReg = buildOnesVal(IsSigned, ResType, I);
|
|
bool IsScalarBool =
|
|
GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
|
|
unsigned Opcode =
|
|
IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
|
|
return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addUse(I.getOperand(1).getReg())
|
|
.addUse(OneReg)
|
|
.addUse(ZeroReg)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I, bool IsSigned,
|
|
unsigned Opcode) const {
|
|
Register SrcReg = I.getOperand(1).getReg();
|
|
// We can convert bool value directly to float type without OpConvert*ToF,
|
|
// however the translator generates OpSelect+OpConvert*ToF, so we do the same.
|
|
if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
|
|
unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
|
|
SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
|
|
if (ResType->getOpcode() == SPIRV::OpTypeVector) {
|
|
const unsigned NumElts = ResType->getOperand(2).getImm();
|
|
TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
|
|
}
|
|
SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
|
|
selectSelect(SrcReg, TmpType, I, false);
|
|
}
|
|
return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectExt(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I, bool IsSigned) const {
|
|
if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool))
|
|
return selectSelect(ResVReg, ResType, I, IsSigned);
|
|
unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
|
|
return selectUnOp(ResVReg, ResType, I, Opcode);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
|
|
Register ResVReg,
|
|
const SPIRVType *IntTy,
|
|
const SPIRVType *BoolTy,
|
|
MachineInstr &I) const {
|
|
// To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
|
|
Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
|
|
bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
|
|
unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
|
|
Register Zero = buildZerosVal(IntTy, I);
|
|
Register One = buildOnesVal(false, IntTy, I);
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
|
|
.addDef(BitIntReg)
|
|
.addUse(GR.getSPIRVTypeID(IntTy))
|
|
.addUse(IntReg)
|
|
.addUse(One)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(BoolTy))
|
|
.addUse(BitIntReg)
|
|
.addUse(Zero)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) {
|
|
Register IntReg = I.getOperand(1).getReg();
|
|
const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
|
|
return selectIntToBool(IntReg, ResVReg, ArgType, ResType, I);
|
|
}
|
|
bool IsSigned = GR.isScalarOrVectorSigned(ResType);
|
|
unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
|
|
return selectUnOp(ResVReg, ResType, I, Opcode);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectConst(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
const APInt &Imm,
|
|
MachineInstr &I) const {
|
|
assert(ResType->getOpcode() != SPIRV::OpTypePointer || Imm.isNullValue());
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
if (ResType->getOpcode() == SPIRV::OpTypePointer && Imm.isNullValue()) {
|
|
return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType));
|
|
// <=32-bit integers should be caught by the sdag pattern.
|
|
assert(Imm.getBitWidth() > 32);
|
|
addNumImm(Imm, MIB);
|
|
return MIB.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
llvm_unreachable("Intrinsic selection not implemented");
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
|
|
// InstructionSelector walks backwards through the instructions. We can use
|
|
// both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
|
|
// first, so can generate an OpBranchConditional here. If there is no
|
|
// G_BRCOND, we just use OpBranch for a regular unconditional branch.
|
|
const MachineInstr *PrevI = I.getPrevNode();
|
|
MachineBasicBlock &MBB = *I.getParent();
|
|
if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
|
|
return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
|
|
.addUse(PrevI->getOperand(0).getReg())
|
|
.addMBB(PrevI->getOperand(1).getMBB())
|
|
.addMBB(I.getOperand(0).getMBB())
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
|
|
.addMBB(I.getOperand(0).getMBB())
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
|
|
// InstructionSelector walks backwards through the instructions. For an
|
|
// explicit conditional branch with no fallthrough, we use both a G_BR and a
|
|
// G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
|
|
// generate the OpBranchConditional in selectBranch above.
|
|
//
|
|
// If an OpBranchConditional has been generated, we simply return, as the work
|
|
// is alread done. If there is no OpBranchConditional, LLVM must be relying on
|
|
// implicit fallthrough to the next basic block, so we need to create an
|
|
// OpBranchConditional with an explicit "false" argument pointing to the next
|
|
// basic block that LLVM would fall through to.
|
|
const MachineInstr *NextI = I.getNextNode();
|
|
// Check if this has already been successfully selected.
|
|
if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
|
|
return true;
|
|
// Must be relying on implicit block fallthrough, so generate an
|
|
// OpBranchConditional with the "next" basic block as the "false" target.
|
|
MachineBasicBlock &MBB = *I.getParent();
|
|
unsigned NextMBBNum = MBB.getNextNode()->getNumber();
|
|
MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
|
|
return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
|
|
.addUse(I.getOperand(0).getReg())
|
|
.addMBB(I.getOperand(1).getMBB())
|
|
.addMBB(NextMBB)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType));
|
|
const unsigned NumOps = I.getNumOperands();
|
|
for (unsigned i = 1; i < NumOps; i += 2) {
|
|
MIB.addUse(I.getOperand(i + 0).getReg());
|
|
MIB.addMBB(I.getOperand(i + 1).getMBB());
|
|
}
|
|
return MIB.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectGlobalValue(
|
|
Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
|
|
// FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
|
|
MachineIRBuilder MIRBuilder(I);
|
|
const GlobalValue *GV = I.getOperand(1).getGlobal();
|
|
SPIRVType *ResType = GR.getOrCreateSPIRVType(
|
|
GV->getType(), MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
|
|
|
|
std::string GlobalIdent = GV->getGlobalIdentifier();
|
|
// TODO: suport @llvm.global.annotations.
|
|
auto GlobalVar = cast<GlobalVariable>(GV);
|
|
|
|
bool HasInit = GlobalVar->hasInitializer() &&
|
|
!isa<UndefValue>(GlobalVar->getInitializer());
|
|
// Skip empty declaration for GVs with initilaizers till we get the decl with
|
|
// passed initializer.
|
|
if (HasInit && !Init)
|
|
return true;
|
|
|
|
unsigned AddrSpace = GV->getAddressSpace();
|
|
SPIRV::StorageClass Storage = addressSpaceToStorageClass(AddrSpace);
|
|
bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
|
|
Storage != SPIRV::StorageClass::Function;
|
|
SPIRV::LinkageType LnkType =
|
|
(GV->isDeclaration() || GV->hasAvailableExternallyLinkage())
|
|
? SPIRV::LinkageType::Import
|
|
: SPIRV::LinkageType::Export;
|
|
|
|
Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
|
|
Storage, Init, GlobalVar->isConstant(),
|
|
HasLnkTy, LnkType, MIRBuilder, true);
|
|
return Reg.isValid();
|
|
}
|
|
|
|
namespace llvm {
|
|
InstructionSelector *
|
|
createSPIRVInstructionSelector(const SPIRVTargetMachine &TM,
|
|
const SPIRVSubtarget &Subtarget,
|
|
const RegisterBankInfo &RBI) {
|
|
return new SPIRVInstructionSelector(TM, Subtarget, RBI);
|
|
}
|
|
} // namespace llvm
|