forked from OSchip/llvm-project
3850 lines
130 KiB
C++
3850 lines
130 KiB
C++
//===- AMDGPULegalizerInfo.cpp -----------------------------------*- C++ -*-==//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
/// \file
|
|
/// This file implements the targeting of the Machinelegalizer class for
|
|
/// AMDGPU.
|
|
/// \todo This should be generated by TableGen.
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#if defined(_MSC_VER) || defined(__MINGW32__)
|
|
// According to Microsoft, one must set _USE_MATH_DEFINES in order to get M_PI
|
|
// from the Visual C++ cmath / math.h headers:
|
|
// https://docs.microsoft.com/en-us/cpp/c-runtime-library/math-constants?view=vs-2019
|
|
#define _USE_MATH_DEFINES
|
|
#endif
|
|
|
|
#include "AMDGPULegalizerInfo.h"
|
|
|
|
#include "AMDGPU.h"
|
|
#include "AMDGPUGlobalISelUtils.h"
|
|
#include "AMDGPUTargetMachine.h"
|
|
#include "SIMachineFunctionInfo.h"
|
|
#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
|
|
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
|
|
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
|
|
#include "llvm/CodeGen/TargetOpcodes.h"
|
|
#include "llvm/CodeGen/ValueTypes.h"
|
|
#include "llvm/IR/DerivedTypes.h"
|
|
#include "llvm/IR/DiagnosticInfo.h"
|
|
#include "llvm/IR/Type.h"
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#define DEBUG_TYPE "amdgpu-legalinfo"
|
|
|
|
using namespace llvm;
|
|
using namespace LegalizeActions;
|
|
using namespace LegalizeMutations;
|
|
using namespace LegalityPredicates;
|
|
using namespace MIPatternMatch;
|
|
|
|
// Round the number of elements to the next power of two elements
|
|
static LLT getPow2VectorType(LLT Ty) {
|
|
unsigned NElts = Ty.getNumElements();
|
|
unsigned Pow2NElts = 1 << Log2_32_Ceil(NElts);
|
|
return Ty.changeNumElements(Pow2NElts);
|
|
}
|
|
|
|
// Round the number of bits to the next power of two bits
|
|
static LLT getPow2ScalarType(LLT Ty) {
|
|
unsigned Bits = Ty.getSizeInBits();
|
|
unsigned Pow2Bits = 1 << Log2_32_Ceil(Bits);
|
|
return LLT::scalar(Pow2Bits);
|
|
}
|
|
|
|
static LegalityPredicate isMultiple32(unsigned TypeIdx,
|
|
unsigned MaxSize = 1024) {
|
|
return [=](const LegalityQuery &Query) {
|
|
const LLT Ty = Query.Types[TypeIdx];
|
|
const LLT EltTy = Ty.getScalarType();
|
|
return Ty.getSizeInBits() <= MaxSize && EltTy.getSizeInBits() % 32 == 0;
|
|
};
|
|
}
|
|
|
|
static LegalityPredicate sizeIs(unsigned TypeIdx, unsigned Size) {
|
|
return [=](const LegalityQuery &Query) {
|
|
return Query.Types[TypeIdx].getSizeInBits() == Size;
|
|
};
|
|
}
|
|
|
|
static LegalityPredicate isSmallOddVector(unsigned TypeIdx) {
|
|
return [=](const LegalityQuery &Query) {
|
|
const LLT Ty = Query.Types[TypeIdx];
|
|
return Ty.isVector() &&
|
|
Ty.getNumElements() % 2 != 0 &&
|
|
Ty.getElementType().getSizeInBits() < 32 &&
|
|
Ty.getSizeInBits() % 32 != 0;
|
|
};
|
|
}
|
|
|
|
static LegalityPredicate isWideVec16(unsigned TypeIdx) {
|
|
return [=](const LegalityQuery &Query) {
|
|
const LLT Ty = Query.Types[TypeIdx];
|
|
const LLT EltTy = Ty.getScalarType();
|
|
return EltTy.getSizeInBits() == 16 && Ty.getNumElements() > 2;
|
|
};
|
|
}
|
|
|
|
static LegalizeMutation oneMoreElement(unsigned TypeIdx) {
|
|
return [=](const LegalityQuery &Query) {
|
|
const LLT Ty = Query.Types[TypeIdx];
|
|
const LLT EltTy = Ty.getElementType();
|
|
return std::make_pair(TypeIdx, LLT::vector(Ty.getNumElements() + 1, EltTy));
|
|
};
|
|
}
|
|
|
|
static LegalizeMutation fewerEltsToSize64Vector(unsigned TypeIdx) {
|
|
return [=](const LegalityQuery &Query) {
|
|
const LLT Ty = Query.Types[TypeIdx];
|
|
const LLT EltTy = Ty.getElementType();
|
|
unsigned Size = Ty.getSizeInBits();
|
|
unsigned Pieces = (Size + 63) / 64;
|
|
unsigned NewNumElts = (Ty.getNumElements() + 1) / Pieces;
|
|
return std::make_pair(TypeIdx, LLT::scalarOrVector(NewNumElts, EltTy));
|
|
};
|
|
}
|
|
|
|
// Increase the number of vector elements to reach the next multiple of 32-bit
|
|
// type.
|
|
static LegalizeMutation moreEltsToNext32Bit(unsigned TypeIdx) {
|
|
return [=](const LegalityQuery &Query) {
|
|
const LLT Ty = Query.Types[TypeIdx];
|
|
|
|
const LLT EltTy = Ty.getElementType();
|
|
const int Size = Ty.getSizeInBits();
|
|
const int EltSize = EltTy.getSizeInBits();
|
|
const int NextMul32 = (Size + 31) / 32;
|
|
|
|
assert(EltSize < 32);
|
|
|
|
const int NewNumElts = (32 * NextMul32 + EltSize - 1) / EltSize;
|
|
return std::make_pair(TypeIdx, LLT::vector(NewNumElts, EltTy));
|
|
};
|
|
}
|
|
|
|
static LegalityPredicate vectorSmallerThan(unsigned TypeIdx, unsigned Size) {
|
|
return [=](const LegalityQuery &Query) {
|
|
const LLT QueryTy = Query.Types[TypeIdx];
|
|
return QueryTy.isVector() && QueryTy.getSizeInBits() < Size;
|
|
};
|
|
}
|
|
|
|
static LegalityPredicate vectorWiderThan(unsigned TypeIdx, unsigned Size) {
|
|
return [=](const LegalityQuery &Query) {
|
|
const LLT QueryTy = Query.Types[TypeIdx];
|
|
return QueryTy.isVector() && QueryTy.getSizeInBits() > Size;
|
|
};
|
|
}
|
|
|
|
static LegalityPredicate numElementsNotEven(unsigned TypeIdx) {
|
|
return [=](const LegalityQuery &Query) {
|
|
const LLT QueryTy = Query.Types[TypeIdx];
|
|
return QueryTy.isVector() && QueryTy.getNumElements() % 2 != 0;
|
|
};
|
|
}
|
|
|
|
// Any combination of 32 or 64-bit elements up to 1024 bits, and multiples of
|
|
// v2s16.
|
|
static LegalityPredicate isRegisterType(unsigned TypeIdx) {
|
|
return [=](const LegalityQuery &Query) {
|
|
const LLT Ty = Query.Types[TypeIdx];
|
|
if (Ty.isVector()) {
|
|
const int EltSize = Ty.getElementType().getSizeInBits();
|
|
return EltSize == 32 || EltSize == 64 ||
|
|
(EltSize == 16 && Ty.getNumElements() % 2 == 0) ||
|
|
EltSize == 128 || EltSize == 256;
|
|
}
|
|
|
|
return Ty.getSizeInBits() % 32 == 0 && Ty.getSizeInBits() <= 1024;
|
|
};
|
|
}
|
|
|
|
static LegalityPredicate elementTypeIs(unsigned TypeIdx, LLT Type) {
|
|
return [=](const LegalityQuery &Query) {
|
|
const LLT QueryTy = Query.Types[TypeIdx];
|
|
return QueryTy.isVector() && QueryTy.getElementType() == Type;
|
|
};
|
|
}
|
|
|
|
static LegalityPredicate elementTypeIsLegal(unsigned TypeIdx) {
|
|
return [=](const LegalityQuery &Query) {
|
|
const LLT QueryTy = Query.Types[TypeIdx];
|
|
if (!QueryTy.isVector())
|
|
return false;
|
|
const LLT EltTy = QueryTy.getElementType();
|
|
return EltTy == LLT::scalar(16) || EltTy.getSizeInBits() >= 32;
|
|
};
|
|
}
|
|
|
|
static LegalityPredicate isWideScalarTruncStore(unsigned TypeIdx) {
|
|
return [=](const LegalityQuery &Query) {
|
|
const LLT Ty = Query.Types[TypeIdx];
|
|
return !Ty.isVector() && Ty.getSizeInBits() > 32 &&
|
|
Query.MMODescrs[0].SizeInBits < Ty.getSizeInBits();
|
|
};
|
|
}
|
|
|
|
static LegalityPredicate smallerThan(unsigned TypeIdx0, unsigned TypeIdx1) {
|
|
return [=](const LegalityQuery &Query) {
|
|
return Query.Types[TypeIdx0].getSizeInBits() <
|
|
Query.Types[TypeIdx1].getSizeInBits();
|
|
};
|
|
}
|
|
|
|
static LegalityPredicate greaterThan(unsigned TypeIdx0, unsigned TypeIdx1) {
|
|
return [=](const LegalityQuery &Query) {
|
|
return Query.Types[TypeIdx0].getSizeInBits() >
|
|
Query.Types[TypeIdx1].getSizeInBits();
|
|
};
|
|
}
|
|
|
|
AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
|
|
const GCNTargetMachine &TM)
|
|
: ST(ST_) {
|
|
using namespace TargetOpcode;
|
|
|
|
auto GetAddrSpacePtr = [&TM](unsigned AS) {
|
|
return LLT::pointer(AS, TM.getPointerSizeInBits(AS));
|
|
};
|
|
|
|
const LLT S1 = LLT::scalar(1);
|
|
const LLT S16 = LLT::scalar(16);
|
|
const LLT S32 = LLT::scalar(32);
|
|
const LLT S64 = LLT::scalar(64);
|
|
const LLT S128 = LLT::scalar(128);
|
|
const LLT S256 = LLT::scalar(256);
|
|
const LLT S1024 = LLT::scalar(1024);
|
|
|
|
const LLT V2S16 = LLT::vector(2, 16);
|
|
const LLT V4S16 = LLT::vector(4, 16);
|
|
|
|
const LLT V2S32 = LLT::vector(2, 32);
|
|
const LLT V3S32 = LLT::vector(3, 32);
|
|
const LLT V4S32 = LLT::vector(4, 32);
|
|
const LLT V5S32 = LLT::vector(5, 32);
|
|
const LLT V6S32 = LLT::vector(6, 32);
|
|
const LLT V7S32 = LLT::vector(7, 32);
|
|
const LLT V8S32 = LLT::vector(8, 32);
|
|
const LLT V9S32 = LLT::vector(9, 32);
|
|
const LLT V10S32 = LLT::vector(10, 32);
|
|
const LLT V11S32 = LLT::vector(11, 32);
|
|
const LLT V12S32 = LLT::vector(12, 32);
|
|
const LLT V13S32 = LLT::vector(13, 32);
|
|
const LLT V14S32 = LLT::vector(14, 32);
|
|
const LLT V15S32 = LLT::vector(15, 32);
|
|
const LLT V16S32 = LLT::vector(16, 32);
|
|
const LLT V32S32 = LLT::vector(32, 32);
|
|
|
|
const LLT V2S64 = LLT::vector(2, 64);
|
|
const LLT V3S64 = LLT::vector(3, 64);
|
|
const LLT V4S64 = LLT::vector(4, 64);
|
|
const LLT V5S64 = LLT::vector(5, 64);
|
|
const LLT V6S64 = LLT::vector(6, 64);
|
|
const LLT V7S64 = LLT::vector(7, 64);
|
|
const LLT V8S64 = LLT::vector(8, 64);
|
|
const LLT V16S64 = LLT::vector(16, 64);
|
|
|
|
std::initializer_list<LLT> AllS32Vectors =
|
|
{V2S32, V3S32, V4S32, V5S32, V6S32, V7S32, V8S32,
|
|
V9S32, V10S32, V11S32, V12S32, V13S32, V14S32, V15S32, V16S32, V32S32};
|
|
std::initializer_list<LLT> AllS64Vectors =
|
|
{V2S64, V3S64, V4S64, V5S64, V6S64, V7S64, V8S64, V16S64};
|
|
|
|
const LLT GlobalPtr = GetAddrSpacePtr(AMDGPUAS::GLOBAL_ADDRESS);
|
|
const LLT ConstantPtr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS);
|
|
const LLT Constant32Ptr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS_32BIT);
|
|
const LLT LocalPtr = GetAddrSpacePtr(AMDGPUAS::LOCAL_ADDRESS);
|
|
const LLT RegionPtr = GetAddrSpacePtr(AMDGPUAS::REGION_ADDRESS);
|
|
const LLT FlatPtr = GetAddrSpacePtr(AMDGPUAS::FLAT_ADDRESS);
|
|
const LLT PrivatePtr = GetAddrSpacePtr(AMDGPUAS::PRIVATE_ADDRESS);
|
|
|
|
const LLT CodePtr = FlatPtr;
|
|
|
|
const std::initializer_list<LLT> AddrSpaces64 = {
|
|
GlobalPtr, ConstantPtr, FlatPtr
|
|
};
|
|
|
|
const std::initializer_list<LLT> AddrSpaces32 = {
|
|
LocalPtr, PrivatePtr, Constant32Ptr, RegionPtr
|
|
};
|
|
|
|
const std::initializer_list<LLT> FPTypesBase = {
|
|
S32, S64
|
|
};
|
|
|
|
const std::initializer_list<LLT> FPTypes16 = {
|
|
S32, S64, S16
|
|
};
|
|
|
|
const std::initializer_list<LLT> FPTypesPK16 = {
|
|
S32, S64, S16, V2S16
|
|
};
|
|
|
|
const LLT MinScalarFPTy = ST.has16BitInsts() ? S16 : S32;
|
|
|
|
setAction({G_BRCOND, S1}, Legal); // VCC branches
|
|
setAction({G_BRCOND, S32}, Legal); // SCC branches
|
|
|
|
// TODO: All multiples of 32, vectors of pointers, all v2s16 pairs, more
|
|
// elements for v3s16
|
|
getActionDefinitionsBuilder(G_PHI)
|
|
.legalFor({S32, S64, V2S16, V4S16, S1, S128, S256})
|
|
.legalFor(AllS32Vectors)
|
|
.legalFor(AllS64Vectors)
|
|
.legalFor(AddrSpaces64)
|
|
.legalFor(AddrSpaces32)
|
|
.clampScalar(0, S32, S256)
|
|
.widenScalarToNextPow2(0, 32)
|
|
.clampMaxNumElements(0, S32, 16)
|
|
.moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
|
|
.legalIf(isPointer(0));
|
|
|
|
if (ST.hasVOP3PInsts()) {
|
|
getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
|
|
.legalFor({S32, S16, V2S16})
|
|
.clampScalar(0, S16, S32)
|
|
.clampMaxNumElements(0, S16, 2)
|
|
.scalarize(0)
|
|
.widenScalarToNextPow2(0, 32);
|
|
} else if (ST.has16BitInsts()) {
|
|
getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
|
|
.legalFor({S32, S16})
|
|
.clampScalar(0, S16, S32)
|
|
.scalarize(0)
|
|
.widenScalarToNextPow2(0, 32);
|
|
} else {
|
|
getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
|
|
.legalFor({S32})
|
|
.clampScalar(0, S32, S32)
|
|
.scalarize(0);
|
|
}
|
|
|
|
// FIXME: Not really legal. Placeholder for custom lowering.
|
|
getActionDefinitionsBuilder({G_SDIV, G_UDIV, G_SREM, G_UREM})
|
|
.customFor({S32, S64})
|
|
.clampScalar(0, S32, S64)
|
|
.widenScalarToNextPow2(0, 32)
|
|
.scalarize(0);
|
|
|
|
getActionDefinitionsBuilder({G_UMULH, G_SMULH})
|
|
.legalFor({S32})
|
|
.clampScalar(0, S32, S32)
|
|
.scalarize(0);
|
|
|
|
// Report legal for any types we can handle anywhere. For the cases only legal
|
|
// on the SALU, RegBankSelect will be able to re-legalize.
|
|
getActionDefinitionsBuilder({G_AND, G_OR, G_XOR})
|
|
.legalFor({S32, S1, S64, V2S32, S16, V2S16, V4S16})
|
|
.clampScalar(0, S32, S64)
|
|
.moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
|
|
.fewerElementsIf(vectorWiderThan(0, 64), fewerEltsToSize64Vector(0))
|
|
.widenScalarToNextPow2(0)
|
|
.scalarize(0);
|
|
|
|
getActionDefinitionsBuilder({G_UADDO, G_USUBO,
|
|
G_UADDE, G_SADDE, G_USUBE, G_SSUBE})
|
|
.legalFor({{S32, S1}, {S32, S32}})
|
|
.minScalar(0, S32)
|
|
// TODO: .scalarize(0)
|
|
.lower();
|
|
|
|
getActionDefinitionsBuilder(G_BITCAST)
|
|
// Don't worry about the size constraint.
|
|
.legalIf(all(isRegisterType(0), isRegisterType(1)))
|
|
.lower();
|
|
|
|
|
|
getActionDefinitionsBuilder(G_CONSTANT)
|
|
.legalFor({S1, S32, S64, S16, GlobalPtr,
|
|
LocalPtr, ConstantPtr, PrivatePtr, FlatPtr })
|
|
.clampScalar(0, S32, S64)
|
|
.widenScalarToNextPow2(0)
|
|
.legalIf(isPointer(0));
|
|
|
|
getActionDefinitionsBuilder(G_FCONSTANT)
|
|
.legalFor({S32, S64, S16})
|
|
.clampScalar(0, S16, S64);
|
|
|
|
getActionDefinitionsBuilder(G_IMPLICIT_DEF)
|
|
.legalFor({S1, S32, S64, S16, V2S32, V4S32, V2S16, V4S16, GlobalPtr,
|
|
ConstantPtr, LocalPtr, FlatPtr, PrivatePtr})
|
|
.moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
|
|
.clampScalarOrElt(0, S32, S1024)
|
|
.legalIf(isMultiple32(0))
|
|
.widenScalarToNextPow2(0, 32)
|
|
.clampMaxNumElements(0, S32, 16);
|
|
|
|
setAction({G_FRAME_INDEX, PrivatePtr}, Legal);
|
|
getActionDefinitionsBuilder(G_GLOBAL_VALUE)
|
|
.unsupportedFor({PrivatePtr})
|
|
.custom();
|
|
setAction({G_BLOCK_ADDR, CodePtr}, Legal);
|
|
|
|
auto &FPOpActions = getActionDefinitionsBuilder(
|
|
{ G_FADD, G_FMUL, G_FMA, G_FCANONICALIZE})
|
|
.legalFor({S32, S64});
|
|
auto &TrigActions = getActionDefinitionsBuilder({G_FSIN, G_FCOS})
|
|
.customFor({S32, S64});
|
|
auto &FDIVActions = getActionDefinitionsBuilder(G_FDIV)
|
|
.customFor({S32, S64});
|
|
|
|
if (ST.has16BitInsts()) {
|
|
if (ST.hasVOP3PInsts())
|
|
FPOpActions.legalFor({S16, V2S16});
|
|
else
|
|
FPOpActions.legalFor({S16});
|
|
|
|
TrigActions.customFor({S16});
|
|
FDIVActions.customFor({S16});
|
|
}
|
|
|
|
auto &MinNumMaxNum = getActionDefinitionsBuilder({
|
|
G_FMINNUM, G_FMAXNUM, G_FMINNUM_IEEE, G_FMAXNUM_IEEE});
|
|
|
|
if (ST.hasVOP3PInsts()) {
|
|
MinNumMaxNum.customFor(FPTypesPK16)
|
|
.moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
|
|
.clampMaxNumElements(0, S16, 2)
|
|
.clampScalar(0, S16, S64)
|
|
.scalarize(0);
|
|
} else if (ST.has16BitInsts()) {
|
|
MinNumMaxNum.customFor(FPTypes16)
|
|
.clampScalar(0, S16, S64)
|
|
.scalarize(0);
|
|
} else {
|
|
MinNumMaxNum.customFor(FPTypesBase)
|
|
.clampScalar(0, S32, S64)
|
|
.scalarize(0);
|
|
}
|
|
|
|
if (ST.hasVOP3PInsts())
|
|
FPOpActions.clampMaxNumElements(0, S16, 2);
|
|
|
|
FPOpActions
|
|
.scalarize(0)
|
|
.clampScalar(0, ST.has16BitInsts() ? S16 : S32, S64);
|
|
|
|
TrigActions
|
|
.scalarize(0)
|
|
.clampScalar(0, ST.has16BitInsts() ? S16 : S32, S64);
|
|
|
|
FDIVActions
|
|
.scalarize(0)
|
|
.clampScalar(0, ST.has16BitInsts() ? S16 : S32, S64);
|
|
|
|
getActionDefinitionsBuilder({G_FNEG, G_FABS})
|
|
.legalFor(FPTypesPK16)
|
|
.clampMaxNumElements(0, S16, 2)
|
|
.scalarize(0)
|
|
.clampScalar(0, S16, S64);
|
|
|
|
if (ST.has16BitInsts()) {
|
|
getActionDefinitionsBuilder({G_FSQRT, G_FFLOOR})
|
|
.legalFor({S32, S64, S16})
|
|
.scalarize(0)
|
|
.clampScalar(0, S16, S64);
|
|
} else {
|
|
getActionDefinitionsBuilder(G_FSQRT)
|
|
.legalFor({S32, S64})
|
|
.scalarize(0)
|
|
.clampScalar(0, S32, S64);
|
|
|
|
if (ST.hasFractBug()) {
|
|
getActionDefinitionsBuilder(G_FFLOOR)
|
|
.customFor({S64})
|
|
.legalFor({S32, S64})
|
|
.scalarize(0)
|
|
.clampScalar(0, S32, S64);
|
|
} else {
|
|
getActionDefinitionsBuilder(G_FFLOOR)
|
|
.legalFor({S32, S64})
|
|
.scalarize(0)
|
|
.clampScalar(0, S32, S64);
|
|
}
|
|
}
|
|
|
|
getActionDefinitionsBuilder(G_FPTRUNC)
|
|
.legalFor({{S32, S64}, {S16, S32}})
|
|
.scalarize(0)
|
|
.lower();
|
|
|
|
getActionDefinitionsBuilder(G_FPEXT)
|
|
.legalFor({{S64, S32}, {S32, S16}})
|
|
.lowerFor({{S64, S16}}) // FIXME: Implement
|
|
.scalarize(0);
|
|
|
|
getActionDefinitionsBuilder(G_FSUB)
|
|
// Use actual fsub instruction
|
|
.legalFor({S32})
|
|
// Must use fadd + fneg
|
|
.lowerFor({S64, S16, V2S16})
|
|
.scalarize(0)
|
|
.clampScalar(0, S32, S64);
|
|
|
|
// Whether this is legal depends on the floating point mode for the function.
|
|
auto &FMad = getActionDefinitionsBuilder(G_FMAD);
|
|
if (ST.hasMadF16())
|
|
FMad.customFor({S32, S16});
|
|
else
|
|
FMad.customFor({S32});
|
|
FMad.scalarize(0)
|
|
.lower();
|
|
|
|
// TODO: Do we need to clamp maximum bitwidth?
|
|
getActionDefinitionsBuilder(G_TRUNC)
|
|
.legalIf(isScalar(0))
|
|
.legalFor({{V2S16, V2S32}})
|
|
.clampMaxNumElements(0, S16, 2)
|
|
// Avoid scalarizing in cases that should be truly illegal. In unresolvable
|
|
// situations (like an invalid implicit use), we don't want to infinite loop
|
|
// in the legalizer.
|
|
.fewerElementsIf(elementTypeIsLegal(0), LegalizeMutations::scalarize(0))
|
|
.alwaysLegal();
|
|
|
|
getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT})
|
|
.legalFor({{S64, S32}, {S32, S16}, {S64, S16},
|
|
{S32, S1}, {S64, S1}, {S16, S1}})
|
|
.scalarize(0)
|
|
.clampScalar(0, S32, S64)
|
|
.widenScalarToNextPow2(1, 32);
|
|
|
|
// TODO: Split s1->s64 during regbankselect for VALU.
|
|
auto &IToFP = getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
|
|
.legalFor({{S32, S32}, {S64, S32}, {S16, S32}})
|
|
.lowerFor({{S32, S64}})
|
|
.lowerIf(typeIs(1, S1))
|
|
.customFor({{S64, S64}});
|
|
if (ST.has16BitInsts())
|
|
IToFP.legalFor({{S16, S16}});
|
|
IToFP.clampScalar(1, S32, S64)
|
|
.scalarize(0)
|
|
.widenScalarToNextPow2(1);
|
|
|
|
auto &FPToI = getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
|
|
.legalFor({{S32, S32}, {S32, S64}, {S32, S16}})
|
|
.customFor({{S64, S64}});
|
|
if (ST.has16BitInsts())
|
|
FPToI.legalFor({{S16, S16}});
|
|
else
|
|
FPToI.minScalar(1, S32);
|
|
|
|
FPToI.minScalar(0, S32)
|
|
.scalarize(0)
|
|
.lower();
|
|
|
|
getActionDefinitionsBuilder(G_INTRINSIC_ROUND)
|
|
.scalarize(0)
|
|
.lower();
|
|
|
|
if (ST.has16BitInsts()) {
|
|
getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_FCEIL, G_FRINT})
|
|
.legalFor({S16, S32, S64})
|
|
.clampScalar(0, S16, S64)
|
|
.scalarize(0);
|
|
} else if (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) {
|
|
getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_FCEIL, G_FRINT})
|
|
.legalFor({S32, S64})
|
|
.clampScalar(0, S32, S64)
|
|
.scalarize(0);
|
|
} else {
|
|
getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_FCEIL, G_FRINT})
|
|
.legalFor({S32})
|
|
.customFor({S64})
|
|
.clampScalar(0, S32, S64)
|
|
.scalarize(0);
|
|
}
|
|
|
|
getActionDefinitionsBuilder({G_PTR_ADD, G_PTR_MASK})
|
|
.scalarize(0)
|
|
.alwaysLegal();
|
|
|
|
auto &CmpBuilder =
|
|
getActionDefinitionsBuilder(G_ICMP)
|
|
// The compare output type differs based on the register bank of the output,
|
|
// so make both s1 and s32 legal.
|
|
//
|
|
// Scalar compares producing output in scc will be promoted to s32, as that
|
|
// is the allocatable register type that will be needed for the copy from
|
|
// scc. This will be promoted during RegBankSelect, and we assume something
|
|
// before that won't try to use s32 result types.
|
|
//
|
|
// Vector compares producing an output in vcc/SGPR will use s1 in VCC reg
|
|
// bank.
|
|
.legalForCartesianProduct(
|
|
{S1}, {S32, S64, GlobalPtr, LocalPtr, ConstantPtr, PrivatePtr, FlatPtr})
|
|
.legalForCartesianProduct(
|
|
{S32}, {S32, S64, GlobalPtr, LocalPtr, ConstantPtr, PrivatePtr, FlatPtr});
|
|
if (ST.has16BitInsts()) {
|
|
CmpBuilder.legalFor({{S1, S16}});
|
|
}
|
|
|
|
CmpBuilder
|
|
.widenScalarToNextPow2(1)
|
|
.clampScalar(1, S32, S64)
|
|
.scalarize(0)
|
|
.legalIf(all(typeInSet(0, {S1, S32}), isPointer(1)));
|
|
|
|
getActionDefinitionsBuilder(G_FCMP)
|
|
.legalForCartesianProduct({S1}, ST.has16BitInsts() ? FPTypes16 : FPTypesBase)
|
|
.widenScalarToNextPow2(1)
|
|
.clampScalar(1, S32, S64)
|
|
.scalarize(0);
|
|
|
|
// FIXME: fpow has a selection pattern that should move to custom lowering.
|
|
auto &Exp2Ops = getActionDefinitionsBuilder({G_FEXP2, G_FLOG2});
|
|
if (ST.has16BitInsts())
|
|
Exp2Ops.legalFor({S32, S16});
|
|
else
|
|
Exp2Ops.legalFor({S32});
|
|
Exp2Ops.clampScalar(0, MinScalarFPTy, S32);
|
|
Exp2Ops.scalarize(0);
|
|
|
|
auto &ExpOps = getActionDefinitionsBuilder({G_FEXP, G_FLOG, G_FLOG10, G_FPOW});
|
|
if (ST.has16BitInsts())
|
|
ExpOps.customFor({{S32}, {S16}});
|
|
else
|
|
ExpOps.customFor({S32});
|
|
ExpOps.clampScalar(0, MinScalarFPTy, S32)
|
|
.scalarize(0);
|
|
|
|
// The 64-bit versions produce 32-bit results, but only on the SALU.
|
|
getActionDefinitionsBuilder(G_CTPOP)
|
|
.legalFor({{S32, S32}, {S32, S64}})
|
|
.clampScalar(0, S32, S32)
|
|
.clampScalar(1, S32, S64)
|
|
.scalarize(0)
|
|
.widenScalarToNextPow2(0, 32)
|
|
.widenScalarToNextPow2(1, 32);
|
|
|
|
// The hardware instructions return a different result on 0 than the generic
|
|
// instructions expect. The hardware produces -1, but these produce the
|
|
// bitwidth.
|
|
getActionDefinitionsBuilder({G_CTLZ, G_CTTZ})
|
|
.scalarize(0)
|
|
.clampScalar(0, S32, S32)
|
|
.clampScalar(1, S32, S64)
|
|
.widenScalarToNextPow2(0, 32)
|
|
.widenScalarToNextPow2(1, 32)
|
|
.lower();
|
|
|
|
// The 64-bit versions produce 32-bit results, but only on the SALU.
|
|
getActionDefinitionsBuilder({G_CTLZ_ZERO_UNDEF, G_CTTZ_ZERO_UNDEF})
|
|
.legalFor({{S32, S32}, {S32, S64}})
|
|
.clampScalar(0, S32, S32)
|
|
.clampScalar(1, S32, S64)
|
|
.scalarize(0)
|
|
.widenScalarToNextPow2(0, 32)
|
|
.widenScalarToNextPow2(1, 32);
|
|
|
|
getActionDefinitionsBuilder(G_BITREVERSE)
|
|
.legalFor({S32})
|
|
.clampScalar(0, S32, S32)
|
|
.scalarize(0);
|
|
|
|
if (ST.has16BitInsts()) {
|
|
getActionDefinitionsBuilder(G_BSWAP)
|
|
.legalFor({S16, S32, V2S16})
|
|
.clampMaxNumElements(0, S16, 2)
|
|
// FIXME: Fixing non-power-of-2 before clamp is workaround for
|
|
// narrowScalar limitation.
|
|
.widenScalarToNextPow2(0)
|
|
.clampScalar(0, S16, S32)
|
|
.scalarize(0);
|
|
|
|
if (ST.hasVOP3PInsts()) {
|
|
getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
|
|
.legalFor({S32, S16, V2S16})
|
|
.moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
|
|
.clampMaxNumElements(0, S16, 2)
|
|
.minScalar(0, S16)
|
|
.widenScalarToNextPow2(0)
|
|
.scalarize(0)
|
|
.lower();
|
|
} else {
|
|
getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
|
|
.legalFor({S32, S16})
|
|
.widenScalarToNextPow2(0)
|
|
.minScalar(0, S16)
|
|
.scalarize(0)
|
|
.lower();
|
|
}
|
|
} else {
|
|
// TODO: Should have same legality without v_perm_b32
|
|
getActionDefinitionsBuilder(G_BSWAP)
|
|
.legalFor({S32})
|
|
.lowerIf(narrowerThan(0, 32))
|
|
// FIXME: Fixing non-power-of-2 before clamp is workaround for
|
|
// narrowScalar limitation.
|
|
.widenScalarToNextPow2(0)
|
|
.maxScalar(0, S32)
|
|
.scalarize(0)
|
|
.lower();
|
|
|
|
getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
|
|
.legalFor({S32})
|
|
.minScalar(0, S32)
|
|
.widenScalarToNextPow2(0)
|
|
.scalarize(0)
|
|
.lower();
|
|
}
|
|
|
|
getActionDefinitionsBuilder(G_INTTOPTR)
|
|
// List the common cases
|
|
.legalForCartesianProduct(AddrSpaces64, {S64})
|
|
.legalForCartesianProduct(AddrSpaces32, {S32})
|
|
.scalarize(0)
|
|
// Accept any address space as long as the size matches
|
|
.legalIf(sameSize(0, 1))
|
|
.widenScalarIf(smallerThan(1, 0),
|
|
[](const LegalityQuery &Query) {
|
|
return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
|
|
})
|
|
.narrowScalarIf(greaterThan(1, 0),
|
|
[](const LegalityQuery &Query) {
|
|
return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
|
|
});
|
|
|
|
getActionDefinitionsBuilder(G_PTRTOINT)
|
|
// List the common cases
|
|
.legalForCartesianProduct(AddrSpaces64, {S64})
|
|
.legalForCartesianProduct(AddrSpaces32, {S32})
|
|
.scalarize(0)
|
|
// Accept any address space as long as the size matches
|
|
.legalIf(sameSize(0, 1))
|
|
.widenScalarIf(smallerThan(0, 1),
|
|
[](const LegalityQuery &Query) {
|
|
return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
|
|
})
|
|
.narrowScalarIf(
|
|
greaterThan(0, 1),
|
|
[](const LegalityQuery &Query) {
|
|
return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
|
|
});
|
|
|
|
getActionDefinitionsBuilder(G_ADDRSPACE_CAST)
|
|
.scalarize(0)
|
|
.custom();
|
|
|
|
// TODO: Should load to s16 be legal? Most loads extend to 32-bits, but we
|
|
// handle some operations by just promoting the register during
|
|
// selection. There are also d16 loads on GFX9+ which preserve the high bits.
|
|
auto maxSizeForAddrSpace = [this](unsigned AS, bool IsLoad) -> unsigned {
|
|
switch (AS) {
|
|
// FIXME: Private element size.
|
|
case AMDGPUAS::PRIVATE_ADDRESS:
|
|
return 32;
|
|
// FIXME: Check subtarget
|
|
case AMDGPUAS::LOCAL_ADDRESS:
|
|
return ST.useDS128() ? 128 : 64;
|
|
|
|
// Treat constant and global as identical. SMRD loads are sometimes usable
|
|
// for global loads (ideally constant address space should be eliminated)
|
|
// depending on the context. Legality cannot be context dependent, but
|
|
// RegBankSelect can split the load as necessary depending on the pointer
|
|
// register bank/uniformity and if the memory is invariant or not written in
|
|
// a kernel.
|
|
case AMDGPUAS::CONSTANT_ADDRESS:
|
|
case AMDGPUAS::GLOBAL_ADDRESS:
|
|
return IsLoad ? 512 : 128;
|
|
default:
|
|
return 128;
|
|
}
|
|
};
|
|
|
|
const auto needToSplitMemOp = [=](const LegalityQuery &Query,
|
|
bool IsLoad) -> bool {
|
|
const LLT DstTy = Query.Types[0];
|
|
|
|
// Split vector extloads.
|
|
unsigned MemSize = Query.MMODescrs[0].SizeInBits;
|
|
unsigned Align = Query.MMODescrs[0].AlignInBits;
|
|
|
|
if (MemSize < DstTy.getSizeInBits())
|
|
MemSize = std::max(MemSize, Align);
|
|
|
|
if (DstTy.isVector() && DstTy.getSizeInBits() > MemSize)
|
|
return true;
|
|
|
|
const LLT PtrTy = Query.Types[1];
|
|
unsigned AS = PtrTy.getAddressSpace();
|
|
if (MemSize > maxSizeForAddrSpace(AS, IsLoad))
|
|
return true;
|
|
|
|
// Catch weird sized loads that don't evenly divide into the access sizes
|
|
// TODO: May be able to widen depending on alignment etc.
|
|
unsigned NumRegs = (MemSize + 31) / 32;
|
|
if (NumRegs == 3) {
|
|
if (!ST.hasDwordx3LoadStores())
|
|
return true;
|
|
} else {
|
|
// If the alignment allows, these should have been widened.
|
|
if (!isPowerOf2_32(NumRegs))
|
|
return true;
|
|
}
|
|
|
|
if (Align < MemSize) {
|
|
const SITargetLowering *TLI = ST.getTargetLowering();
|
|
return !TLI->allowsMisalignedMemoryAccessesImpl(MemSize, AS, Align / 8);
|
|
}
|
|
|
|
return false;
|
|
};
|
|
|
|
const auto shouldWidenLoadResult = [=](const LegalityQuery &Query) -> bool {
|
|
unsigned Size = Query.Types[0].getSizeInBits();
|
|
if (isPowerOf2_32(Size))
|
|
return false;
|
|
|
|
if (Size == 96 && ST.hasDwordx3LoadStores())
|
|
return false;
|
|
|
|
unsigned AddrSpace = Query.Types[1].getAddressSpace();
|
|
if (Size >= maxSizeForAddrSpace(AddrSpace, true))
|
|
return false;
|
|
|
|
unsigned Align = Query.MMODescrs[0].AlignInBits;
|
|
unsigned RoundedSize = NextPowerOf2(Size);
|
|
return (Align >= RoundedSize);
|
|
};
|
|
|
|
unsigned GlobalAlign32 = ST.hasUnalignedBufferAccess() ? 0 : 32;
|
|
unsigned GlobalAlign16 = ST.hasUnalignedBufferAccess() ? 0 : 16;
|
|
unsigned GlobalAlign8 = ST.hasUnalignedBufferAccess() ? 0 : 8;
|
|
|
|
// TODO: Refine based on subtargets which support unaligned access or 128-bit
|
|
// LDS
|
|
// TODO: Unsupported flat for SI.
|
|
|
|
for (unsigned Op : {G_LOAD, G_STORE}) {
|
|
const bool IsStore = Op == G_STORE;
|
|
|
|
auto &Actions = getActionDefinitionsBuilder(Op);
|
|
// Whitelist the common cases.
|
|
// TODO: Loads to s16 on gfx9
|
|
Actions.legalForTypesWithMemDesc({{S32, GlobalPtr, 32, GlobalAlign32},
|
|
{V2S32, GlobalPtr, 64, GlobalAlign32},
|
|
{V4S32, GlobalPtr, 128, GlobalAlign32},
|
|
{S128, GlobalPtr, 128, GlobalAlign32},
|
|
{S64, GlobalPtr, 64, GlobalAlign32},
|
|
{V2S64, GlobalPtr, 128, GlobalAlign32},
|
|
{V2S16, GlobalPtr, 32, GlobalAlign32},
|
|
{S32, GlobalPtr, 8, GlobalAlign8},
|
|
{S32, GlobalPtr, 16, GlobalAlign16},
|
|
|
|
{S32, LocalPtr, 32, 32},
|
|
{S64, LocalPtr, 64, 32},
|
|
{V2S32, LocalPtr, 64, 32},
|
|
{S32, LocalPtr, 8, 8},
|
|
{S32, LocalPtr, 16, 16},
|
|
{V2S16, LocalPtr, 32, 32},
|
|
|
|
{S32, PrivatePtr, 32, 32},
|
|
{S32, PrivatePtr, 8, 8},
|
|
{S32, PrivatePtr, 16, 16},
|
|
{V2S16, PrivatePtr, 32, 32},
|
|
|
|
{S32, FlatPtr, 32, GlobalAlign32},
|
|
{S32, FlatPtr, 16, GlobalAlign16},
|
|
{S32, FlatPtr, 8, GlobalAlign8},
|
|
{V2S16, FlatPtr, 32, GlobalAlign32},
|
|
|
|
{S32, ConstantPtr, 32, GlobalAlign32},
|
|
{V2S32, ConstantPtr, 64, GlobalAlign32},
|
|
{V4S32, ConstantPtr, 128, GlobalAlign32},
|
|
{S64, ConstantPtr, 64, GlobalAlign32},
|
|
{S128, ConstantPtr, 128, GlobalAlign32},
|
|
{V2S32, ConstantPtr, 32, GlobalAlign32}});
|
|
Actions
|
|
.customIf(typeIs(1, Constant32Ptr))
|
|
// Widen suitably aligned loads by loading extra elements.
|
|
.moreElementsIf([=](const LegalityQuery &Query) {
|
|
const LLT Ty = Query.Types[0];
|
|
return Op == G_LOAD && Ty.isVector() &&
|
|
shouldWidenLoadResult(Query);
|
|
}, moreElementsToNextPow2(0))
|
|
.widenScalarIf([=](const LegalityQuery &Query) {
|
|
const LLT Ty = Query.Types[0];
|
|
return Op == G_LOAD && !Ty.isVector() &&
|
|
shouldWidenLoadResult(Query);
|
|
}, widenScalarOrEltToNextPow2(0))
|
|
.narrowScalarIf(
|
|
[=](const LegalityQuery &Query) -> bool {
|
|
return !Query.Types[0].isVector() &&
|
|
needToSplitMemOp(Query, Op == G_LOAD);
|
|
},
|
|
[=](const LegalityQuery &Query) -> std::pair<unsigned, LLT> {
|
|
const LLT DstTy = Query.Types[0];
|
|
const LLT PtrTy = Query.Types[1];
|
|
|
|
const unsigned DstSize = DstTy.getSizeInBits();
|
|
unsigned MemSize = Query.MMODescrs[0].SizeInBits;
|
|
|
|
// Split extloads.
|
|
if (DstSize > MemSize)
|
|
return std::make_pair(0, LLT::scalar(MemSize));
|
|
|
|
if (!isPowerOf2_32(DstSize)) {
|
|
// We're probably decomposing an odd sized store. Try to split
|
|
// to the widest type. TODO: Account for alignment. As-is it
|
|
// should be OK, since the new parts will be further legalized.
|
|
unsigned FloorSize = PowerOf2Floor(DstSize);
|
|
return std::make_pair(0, LLT::scalar(FloorSize));
|
|
}
|
|
|
|
if (DstSize > 32 && (DstSize % 32 != 0)) {
|
|
// FIXME: Need a way to specify non-extload of larger size if
|
|
// suitably aligned.
|
|
return std::make_pair(0, LLT::scalar(32 * (DstSize / 32)));
|
|
}
|
|
|
|
unsigned MaxSize = maxSizeForAddrSpace(PtrTy.getAddressSpace(),
|
|
Op == G_LOAD);
|
|
if (MemSize > MaxSize)
|
|
return std::make_pair(0, LLT::scalar(MaxSize));
|
|
|
|
unsigned Align = Query.MMODescrs[0].AlignInBits;
|
|
return std::make_pair(0, LLT::scalar(Align));
|
|
})
|
|
.fewerElementsIf(
|
|
[=](const LegalityQuery &Query) -> bool {
|
|
return Query.Types[0].isVector() &&
|
|
needToSplitMemOp(Query, Op == G_LOAD);
|
|
},
|
|
[=](const LegalityQuery &Query) -> std::pair<unsigned, LLT> {
|
|
const LLT DstTy = Query.Types[0];
|
|
const LLT PtrTy = Query.Types[1];
|
|
|
|
LLT EltTy = DstTy.getElementType();
|
|
unsigned MaxSize = maxSizeForAddrSpace(PtrTy.getAddressSpace(),
|
|
Op == G_LOAD);
|
|
|
|
// FIXME: Handle widened to power of 2 results better. This ends
|
|
// up scalarizing.
|
|
// FIXME: 3 element stores scalarized on SI
|
|
|
|
// Split if it's too large for the address space.
|
|
if (Query.MMODescrs[0].SizeInBits > MaxSize) {
|
|
unsigned NumElts = DstTy.getNumElements();
|
|
unsigned EltSize = EltTy.getSizeInBits();
|
|
|
|
if (MaxSize % EltSize == 0) {
|
|
return std::make_pair(
|
|
0, LLT::scalarOrVector(MaxSize / EltSize, EltTy));
|
|
}
|
|
|
|
unsigned NumPieces = Query.MMODescrs[0].SizeInBits / MaxSize;
|
|
|
|
// FIXME: Refine when odd breakdowns handled
|
|
// The scalars will need to be re-legalized.
|
|
if (NumPieces == 1 || NumPieces >= NumElts ||
|
|
NumElts % NumPieces != 0)
|
|
return std::make_pair(0, EltTy);
|
|
|
|
return std::make_pair(0,
|
|
LLT::vector(NumElts / NumPieces, EltTy));
|
|
}
|
|
|
|
// FIXME: We could probably handle weird extending loads better.
|
|
unsigned MemSize = Query.MMODescrs[0].SizeInBits;
|
|
if (DstTy.getSizeInBits() > MemSize)
|
|
return std::make_pair(0, EltTy);
|
|
|
|
unsigned EltSize = EltTy.getSizeInBits();
|
|
unsigned DstSize = DstTy.getSizeInBits();
|
|
if (!isPowerOf2_32(DstSize)) {
|
|
// We're probably decomposing an odd sized store. Try to split
|
|
// to the widest type. TODO: Account for alignment. As-is it
|
|
// should be OK, since the new parts will be further legalized.
|
|
unsigned FloorSize = PowerOf2Floor(DstSize);
|
|
return std::make_pair(
|
|
0, LLT::scalarOrVector(FloorSize / EltSize, EltTy));
|
|
}
|
|
|
|
// Need to split because of alignment.
|
|
unsigned Align = Query.MMODescrs[0].AlignInBits;
|
|
if (EltSize > Align &&
|
|
(EltSize / Align < DstTy.getNumElements())) {
|
|
return std::make_pair(0, LLT::vector(EltSize / Align, EltTy));
|
|
}
|
|
|
|
// May need relegalization for the scalars.
|
|
return std::make_pair(0, EltTy);
|
|
})
|
|
.minScalar(0, S32);
|
|
|
|
if (IsStore)
|
|
Actions.narrowScalarIf(isWideScalarTruncStore(0), changeTo(0, S32));
|
|
|
|
// TODO: Need a bitcast lower option?
|
|
Actions
|
|
.legalIf([=](const LegalityQuery &Query) {
|
|
const LLT Ty0 = Query.Types[0];
|
|
unsigned Size = Ty0.getSizeInBits();
|
|
unsigned MemSize = Query.MMODescrs[0].SizeInBits;
|
|
unsigned Align = Query.MMODescrs[0].AlignInBits;
|
|
|
|
// FIXME: Widening store from alignment not valid.
|
|
if (MemSize < Size)
|
|
MemSize = std::max(MemSize, Align);
|
|
|
|
// No extending vector loads.
|
|
if (Size > MemSize && Ty0.isVector())
|
|
return false;
|
|
|
|
switch (MemSize) {
|
|
case 8:
|
|
case 16:
|
|
return Size == 32;
|
|
case 32:
|
|
case 64:
|
|
case 128:
|
|
return true;
|
|
case 96:
|
|
return ST.hasDwordx3LoadStores();
|
|
case 256:
|
|
case 512:
|
|
return true;
|
|
default:
|
|
return false;
|
|
}
|
|
})
|
|
.widenScalarToNextPow2(0)
|
|
.moreElementsIf(vectorSmallerThan(0, 32), moreEltsToNext32Bit(0));
|
|
}
|
|
|
|
auto &ExtLoads = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
|
|
.legalForTypesWithMemDesc({{S32, GlobalPtr, 8, 8},
|
|
{S32, GlobalPtr, 16, 2 * 8},
|
|
{S32, LocalPtr, 8, 8},
|
|
{S32, LocalPtr, 16, 16},
|
|
{S32, PrivatePtr, 8, 8},
|
|
{S32, PrivatePtr, 16, 16},
|
|
{S32, ConstantPtr, 8, 8},
|
|
{S32, ConstantPtr, 16, 2 * 8}});
|
|
if (ST.hasFlatAddressSpace()) {
|
|
ExtLoads.legalForTypesWithMemDesc(
|
|
{{S32, FlatPtr, 8, 8}, {S32, FlatPtr, 16, 16}});
|
|
}
|
|
|
|
ExtLoads.clampScalar(0, S32, S32)
|
|
.widenScalarToNextPow2(0)
|
|
.unsupportedIfMemSizeNotPow2()
|
|
.lower();
|
|
|
|
auto &Atomics = getActionDefinitionsBuilder(
|
|
{G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB,
|
|
G_ATOMICRMW_AND, G_ATOMICRMW_OR, G_ATOMICRMW_XOR,
|
|
G_ATOMICRMW_MAX, G_ATOMICRMW_MIN, G_ATOMICRMW_UMAX,
|
|
G_ATOMICRMW_UMIN})
|
|
.legalFor({{S32, GlobalPtr}, {S32, LocalPtr},
|
|
{S64, GlobalPtr}, {S64, LocalPtr}});
|
|
if (ST.hasFlatAddressSpace()) {
|
|
Atomics.legalFor({{S32, FlatPtr}, {S64, FlatPtr}});
|
|
}
|
|
|
|
getActionDefinitionsBuilder(G_ATOMICRMW_FADD)
|
|
.legalFor({{S32, LocalPtr}});
|
|
|
|
// BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, and output
|
|
// demarshalling
|
|
getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG)
|
|
.customFor({{S32, GlobalPtr}, {S64, GlobalPtr},
|
|
{S32, FlatPtr}, {S64, FlatPtr}})
|
|
.legalFor({{S32, LocalPtr}, {S64, LocalPtr},
|
|
{S32, RegionPtr}, {S64, RegionPtr}});
|
|
// TODO: Pointer types, any 32-bit or 64-bit vector
|
|
|
|
// Condition should be s32 for scalar, s1 for vector.
|
|
getActionDefinitionsBuilder(G_SELECT)
|
|
.legalForCartesianProduct({S32, S64, S16, V2S32, V2S16, V4S16,
|
|
GlobalPtr, LocalPtr, FlatPtr, PrivatePtr,
|
|
LLT::vector(2, LocalPtr), LLT::vector(2, PrivatePtr)}, {S1, S32})
|
|
.clampScalar(0, S16, S64)
|
|
.moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
|
|
.fewerElementsIf(numElementsNotEven(0), scalarize(0))
|
|
.scalarize(1)
|
|
.clampMaxNumElements(0, S32, 2)
|
|
.clampMaxNumElements(0, LocalPtr, 2)
|
|
.clampMaxNumElements(0, PrivatePtr, 2)
|
|
.scalarize(0)
|
|
.widenScalarToNextPow2(0)
|
|
.legalIf(all(isPointer(0), typeInSet(1, {S1, S32})));
|
|
|
|
// TODO: Only the low 4/5/6 bits of the shift amount are observed, so we can
|
|
// be more flexible with the shift amount type.
|
|
auto &Shifts = getActionDefinitionsBuilder({G_SHL, G_LSHR, G_ASHR})
|
|
.legalFor({{S32, S32}, {S64, S32}});
|
|
if (ST.has16BitInsts()) {
|
|
if (ST.hasVOP3PInsts()) {
|
|
Shifts.legalFor({{S16, S32}, {S16, S16}, {V2S16, V2S16}})
|
|
.clampMaxNumElements(0, S16, 2);
|
|
} else
|
|
Shifts.legalFor({{S16, S32}, {S16, S16}});
|
|
|
|
// TODO: Support 16-bit shift amounts
|
|
Shifts.clampScalar(1, S32, S32);
|
|
Shifts.clampScalar(0, S16, S64);
|
|
Shifts.widenScalarToNextPow2(0, 16);
|
|
} else {
|
|
// Make sure we legalize the shift amount type first, as the general
|
|
// expansion for the shifted type will produce much worse code if it hasn't
|
|
// been truncated already.
|
|
Shifts.clampScalar(1, S32, S32);
|
|
Shifts.clampScalar(0, S32, S64);
|
|
Shifts.widenScalarToNextPow2(0, 32);
|
|
}
|
|
Shifts.scalarize(0);
|
|
|
|
for (unsigned Op : {G_EXTRACT_VECTOR_ELT, G_INSERT_VECTOR_ELT}) {
|
|
unsigned VecTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 1 : 0;
|
|
unsigned EltTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 0 : 1;
|
|
unsigned IdxTypeIdx = 2;
|
|
|
|
getActionDefinitionsBuilder(Op)
|
|
.customIf([=](const LegalityQuery &Query) {
|
|
const LLT EltTy = Query.Types[EltTypeIdx];
|
|
const LLT VecTy = Query.Types[VecTypeIdx];
|
|
const LLT IdxTy = Query.Types[IdxTypeIdx];
|
|
return (EltTy.getSizeInBits() == 16 ||
|
|
EltTy.getSizeInBits() % 32 == 0) &&
|
|
VecTy.getSizeInBits() % 32 == 0 &&
|
|
VecTy.getSizeInBits() <= 1024 &&
|
|
IdxTy.getSizeInBits() == 32;
|
|
})
|
|
.clampScalar(EltTypeIdx, S32, S64)
|
|
.clampScalar(VecTypeIdx, S32, S64)
|
|
.clampScalar(IdxTypeIdx, S32, S32);
|
|
}
|
|
|
|
getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
|
|
.unsupportedIf([=](const LegalityQuery &Query) {
|
|
const LLT &EltTy = Query.Types[1].getElementType();
|
|
return Query.Types[0] != EltTy;
|
|
});
|
|
|
|
for (unsigned Op : {G_EXTRACT, G_INSERT}) {
|
|
unsigned BigTyIdx = Op == G_EXTRACT ? 1 : 0;
|
|
unsigned LitTyIdx = Op == G_EXTRACT ? 0 : 1;
|
|
|
|
// FIXME: Doesn't handle extract of illegal sizes.
|
|
getActionDefinitionsBuilder(Op)
|
|
.lowerIf(all(typeIs(LitTyIdx, S16), sizeIs(BigTyIdx, 32)))
|
|
// FIXME: Multiples of 16 should not be legal.
|
|
.legalIf([=](const LegalityQuery &Query) {
|
|
const LLT BigTy = Query.Types[BigTyIdx];
|
|
const LLT LitTy = Query.Types[LitTyIdx];
|
|
return (BigTy.getSizeInBits() % 32 == 0) &&
|
|
(LitTy.getSizeInBits() % 16 == 0);
|
|
})
|
|
.widenScalarIf(
|
|
[=](const LegalityQuery &Query) {
|
|
const LLT BigTy = Query.Types[BigTyIdx];
|
|
return (BigTy.getScalarSizeInBits() < 16);
|
|
},
|
|
LegalizeMutations::widenScalarOrEltToNextPow2(BigTyIdx, 16))
|
|
.widenScalarIf(
|
|
[=](const LegalityQuery &Query) {
|
|
const LLT LitTy = Query.Types[LitTyIdx];
|
|
return (LitTy.getScalarSizeInBits() < 16);
|
|
},
|
|
LegalizeMutations::widenScalarOrEltToNextPow2(LitTyIdx, 16))
|
|
.moreElementsIf(isSmallOddVector(BigTyIdx), oneMoreElement(BigTyIdx))
|
|
.widenScalarToNextPow2(BigTyIdx, 32);
|
|
|
|
}
|
|
|
|
auto &BuildVector = getActionDefinitionsBuilder(G_BUILD_VECTOR)
|
|
.legalForCartesianProduct(AllS32Vectors, {S32})
|
|
.legalForCartesianProduct(AllS64Vectors, {S64})
|
|
.clampNumElements(0, V16S32, V32S32)
|
|
.clampNumElements(0, V2S64, V16S64)
|
|
.fewerElementsIf(isWideVec16(0), changeTo(0, V2S16));
|
|
|
|
if (ST.hasScalarPackInsts()) {
|
|
BuildVector
|
|
// FIXME: Should probably widen s1 vectors straight to s32
|
|
.minScalarOrElt(0, S16)
|
|
// Widen source elements and produce a G_BUILD_VECTOR_TRUNC
|
|
.minScalar(1, S32);
|
|
|
|
getActionDefinitionsBuilder(G_BUILD_VECTOR_TRUNC)
|
|
.legalFor({V2S16, S32})
|
|
.lower();
|
|
BuildVector.minScalarOrElt(0, S32);
|
|
} else {
|
|
BuildVector.customFor({V2S16, S16});
|
|
BuildVector.minScalarOrElt(0, S32);
|
|
|
|
getActionDefinitionsBuilder(G_BUILD_VECTOR_TRUNC)
|
|
.customFor({V2S16, S32})
|
|
.lower();
|
|
}
|
|
|
|
BuildVector.legalIf(isRegisterType(0));
|
|
|
|
// FIXME: Clamp maximum size
|
|
getActionDefinitionsBuilder(G_CONCAT_VECTORS)
|
|
.legalIf(isRegisterType(0));
|
|
|
|
// TODO: Don't fully scalarize v2s16 pieces? Or combine out thosse
|
|
// pre-legalize.
|
|
if (ST.hasVOP3PInsts()) {
|
|
getActionDefinitionsBuilder(G_SHUFFLE_VECTOR)
|
|
.customFor({V2S16, V2S16})
|
|
.lower();
|
|
} else
|
|
getActionDefinitionsBuilder(G_SHUFFLE_VECTOR).lower();
|
|
|
|
// Merge/Unmerge
|
|
for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
|
|
unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
|
|
unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
|
|
|
|
auto notValidElt = [=](const LegalityQuery &Query, unsigned TypeIdx) {
|
|
const LLT &Ty = Query.Types[TypeIdx];
|
|
if (Ty.isVector()) {
|
|
const LLT &EltTy = Ty.getElementType();
|
|
if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 64)
|
|
return true;
|
|
if (!isPowerOf2_32(EltTy.getSizeInBits()))
|
|
return true;
|
|
}
|
|
return false;
|
|
};
|
|
|
|
auto &Builder = getActionDefinitionsBuilder(Op)
|
|
// Try to widen to s16 first for small types.
|
|
// TODO: Only do this on targets with legal s16 shifts
|
|
.minScalarOrEltIf(narrowerThan(LitTyIdx, 16), LitTyIdx, S16)
|
|
|
|
.widenScalarToNextPow2(LitTyIdx, /*Min*/ 16)
|
|
.lowerFor({{S16, V2S16}})
|
|
.moreElementsIf(isSmallOddVector(BigTyIdx), oneMoreElement(BigTyIdx))
|
|
.fewerElementsIf(all(typeIs(0, S16), vectorWiderThan(1, 32),
|
|
elementTypeIs(1, S16)),
|
|
changeTo(1, V2S16))
|
|
// Clamp the little scalar to s8-s256 and make it a power of 2. It's not
|
|
// worth considering the multiples of 64 since 2*192 and 2*384 are not
|
|
// valid.
|
|
.clampScalar(LitTyIdx, S32, S256)
|
|
.widenScalarToNextPow2(LitTyIdx, /*Min*/ 32)
|
|
// Break up vectors with weird elements into scalars
|
|
.fewerElementsIf(
|
|
[=](const LegalityQuery &Query) { return notValidElt(Query, 0); },
|
|
scalarize(0))
|
|
.fewerElementsIf(
|
|
[=](const LegalityQuery &Query) { return notValidElt(Query, 1); },
|
|
scalarize(1))
|
|
.clampScalar(BigTyIdx, S32, S1024);
|
|
|
|
if (Op == G_MERGE_VALUES) {
|
|
Builder.widenScalarIf(
|
|
// TODO: Use 16-bit shifts if legal for 8-bit values?
|
|
[=](const LegalityQuery &Query) {
|
|
const LLT Ty = Query.Types[LitTyIdx];
|
|
return Ty.getSizeInBits() < 32;
|
|
},
|
|
changeTo(LitTyIdx, S32));
|
|
}
|
|
|
|
Builder.widenScalarIf(
|
|
[=](const LegalityQuery &Query) {
|
|
const LLT Ty = Query.Types[BigTyIdx];
|
|
return !isPowerOf2_32(Ty.getSizeInBits()) &&
|
|
Ty.getSizeInBits() % 16 != 0;
|
|
},
|
|
[=](const LegalityQuery &Query) {
|
|
// Pick the next power of 2, or a multiple of 64 over 128.
|
|
// Whichever is smaller.
|
|
const LLT &Ty = Query.Types[BigTyIdx];
|
|
unsigned NewSizeInBits = 1 << Log2_32_Ceil(Ty.getSizeInBits() + 1);
|
|
if (NewSizeInBits >= 256) {
|
|
unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1);
|
|
if (RoundedTo < NewSizeInBits)
|
|
NewSizeInBits = RoundedTo;
|
|
}
|
|
return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits));
|
|
})
|
|
.legalIf([=](const LegalityQuery &Query) {
|
|
const LLT &BigTy = Query.Types[BigTyIdx];
|
|
const LLT &LitTy = Query.Types[LitTyIdx];
|
|
|
|
if (BigTy.isVector() && BigTy.getSizeInBits() < 32)
|
|
return false;
|
|
if (LitTy.isVector() && LitTy.getSizeInBits() < 32)
|
|
return false;
|
|
|
|
return BigTy.getSizeInBits() % 16 == 0 &&
|
|
LitTy.getSizeInBits() % 16 == 0 &&
|
|
BigTy.getSizeInBits() <= 1024;
|
|
})
|
|
// Any vectors left are the wrong size. Scalarize them.
|
|
.scalarize(0)
|
|
.scalarize(1);
|
|
}
|
|
|
|
// S64 is only legal on SALU, and needs to be broken into 32-bit elements in
|
|
// RegBankSelect.
|
|
auto &SextInReg = getActionDefinitionsBuilder(G_SEXT_INREG)
|
|
.legalFor({{S32}, {S64}});
|
|
|
|
if (ST.hasVOP3PInsts()) {
|
|
SextInReg.lowerFor({{V2S16}})
|
|
// Prefer to reduce vector widths for 16-bit vectors before lowering, to
|
|
// get more vector shift opportunities, since we'll get those when
|
|
// expanded.
|
|
.fewerElementsIf(elementTypeIs(0, S16), changeTo(0, V2S16));
|
|
} else if (ST.has16BitInsts()) {
|
|
SextInReg.lowerFor({{S32}, {S64}, {S16}});
|
|
} else {
|
|
// Prefer to promote to s32 before lowering if we don't have 16-bit
|
|
// shifts. This avoid a lot of intermediate truncate and extend operations.
|
|
SextInReg.lowerFor({{S32}, {S64}});
|
|
}
|
|
|
|
SextInReg
|
|
.scalarize(0)
|
|
.clampScalar(0, S32, S64)
|
|
.lower();
|
|
|
|
getActionDefinitionsBuilder(G_READCYCLECOUNTER)
|
|
.legalFor({S64});
|
|
|
|
getActionDefinitionsBuilder({
|
|
// TODO: Verify V_BFI_B32 is generated from expanded bit ops
|
|
G_FCOPYSIGN,
|
|
|
|
G_ATOMIC_CMPXCHG_WITH_SUCCESS,
|
|
G_READ_REGISTER,
|
|
G_WRITE_REGISTER,
|
|
|
|
G_SADDO, G_SSUBO,
|
|
|
|
// TODO: Implement
|
|
G_FMINIMUM, G_FMAXIMUM
|
|
}).lower();
|
|
|
|
getActionDefinitionsBuilder({G_VASTART, G_VAARG, G_BRJT, G_JUMP_TABLE,
|
|
G_DYN_STACKALLOC, G_INDEXED_LOAD, G_INDEXED_SEXTLOAD,
|
|
G_INDEXED_ZEXTLOAD, G_INDEXED_STORE})
|
|
.unsupported();
|
|
|
|
computeTables();
|
|
verify(*ST.getInstrInfo());
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeCustom(MachineInstr &MI,
|
|
MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B,
|
|
GISelChangeObserver &Observer) const {
|
|
switch (MI.getOpcode()) {
|
|
case TargetOpcode::G_ADDRSPACE_CAST:
|
|
return legalizeAddrSpaceCast(MI, MRI, B);
|
|
case TargetOpcode::G_FRINT:
|
|
return legalizeFrint(MI, MRI, B);
|
|
case TargetOpcode::G_FCEIL:
|
|
return legalizeFceil(MI, MRI, B);
|
|
case TargetOpcode::G_INTRINSIC_TRUNC:
|
|
return legalizeIntrinsicTrunc(MI, MRI, B);
|
|
case TargetOpcode::G_SITOFP:
|
|
return legalizeITOFP(MI, MRI, B, true);
|
|
case TargetOpcode::G_UITOFP:
|
|
return legalizeITOFP(MI, MRI, B, false);
|
|
case TargetOpcode::G_FPTOSI:
|
|
return legalizeFPTOI(MI, MRI, B, true);
|
|
case TargetOpcode::G_FPTOUI:
|
|
return legalizeFPTOI(MI, MRI, B, false);
|
|
case TargetOpcode::G_FMINNUM:
|
|
case TargetOpcode::G_FMAXNUM:
|
|
case TargetOpcode::G_FMINNUM_IEEE:
|
|
case TargetOpcode::G_FMAXNUM_IEEE:
|
|
return legalizeMinNumMaxNum(MI, MRI, B);
|
|
case TargetOpcode::G_EXTRACT_VECTOR_ELT:
|
|
return legalizeExtractVectorElt(MI, MRI, B);
|
|
case TargetOpcode::G_INSERT_VECTOR_ELT:
|
|
return legalizeInsertVectorElt(MI, MRI, B);
|
|
case TargetOpcode::G_SHUFFLE_VECTOR:
|
|
return legalizeShuffleVector(MI, MRI, B);
|
|
case TargetOpcode::G_FSIN:
|
|
case TargetOpcode::G_FCOS:
|
|
return legalizeSinCos(MI, MRI, B);
|
|
case TargetOpcode::G_GLOBAL_VALUE:
|
|
return legalizeGlobalValue(MI, MRI, B);
|
|
case TargetOpcode::G_LOAD:
|
|
return legalizeLoad(MI, MRI, B, Observer);
|
|
case TargetOpcode::G_FMAD:
|
|
return legalizeFMad(MI, MRI, B);
|
|
case TargetOpcode::G_FDIV:
|
|
return legalizeFDIV(MI, MRI, B);
|
|
case TargetOpcode::G_UDIV:
|
|
case TargetOpcode::G_UREM:
|
|
return legalizeUDIV_UREM(MI, MRI, B);
|
|
case TargetOpcode::G_SDIV:
|
|
case TargetOpcode::G_SREM:
|
|
return legalizeSDIV_SREM(MI, MRI, B);
|
|
case TargetOpcode::G_ATOMIC_CMPXCHG:
|
|
return legalizeAtomicCmpXChg(MI, MRI, B);
|
|
case TargetOpcode::G_FLOG:
|
|
return legalizeFlog(MI, B, 1.0f / numbers::log2ef);
|
|
case TargetOpcode::G_FLOG10:
|
|
return legalizeFlog(MI, B, numbers::ln2f / numbers::ln10f);
|
|
case TargetOpcode::G_FEXP:
|
|
return legalizeFExp(MI, B);
|
|
case TargetOpcode::G_FPOW:
|
|
return legalizeFPow(MI, B);
|
|
case TargetOpcode::G_FFLOOR:
|
|
return legalizeFFloor(MI, MRI, B);
|
|
case TargetOpcode::G_BUILD_VECTOR:
|
|
return legalizeBuildVector(MI, MRI, B);
|
|
default:
|
|
return false;
|
|
}
|
|
|
|
llvm_unreachable("expected switch to return");
|
|
}
|
|
|
|
Register AMDGPULegalizerInfo::getSegmentAperture(
|
|
unsigned AS,
|
|
MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
MachineFunction &MF = B.getMF();
|
|
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
|
|
const LLT S32 = LLT::scalar(32);
|
|
|
|
assert(AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::PRIVATE_ADDRESS);
|
|
|
|
if (ST.hasApertureRegs()) {
|
|
// FIXME: Use inline constants (src_{shared, private}_base) instead of
|
|
// getreg.
|
|
unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
|
|
AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
|
|
AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
|
|
unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
|
|
AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
|
|
AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
|
|
unsigned Encoding =
|
|
AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
|
|
Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
|
|
WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
|
|
|
|
Register GetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
|
|
|
|
B.buildInstr(AMDGPU::S_GETREG_B32)
|
|
.addDef(GetReg)
|
|
.addImm(Encoding);
|
|
MRI.setType(GetReg, S32);
|
|
|
|
auto ShiftAmt = B.buildConstant(S32, WidthM1 + 1);
|
|
return B.buildShl(S32, GetReg, ShiftAmt).getReg(0);
|
|
}
|
|
|
|
Register QueuePtr = MRI.createGenericVirtualRegister(
|
|
LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
|
|
|
|
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
|
|
if (!loadInputValue(QueuePtr, B, &MFI->getArgInfo().QueuePtr))
|
|
return Register();
|
|
|
|
// Offset into amd_queue_t for group_segment_aperture_base_hi /
|
|
// private_segment_aperture_base_hi.
|
|
uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
|
|
|
|
// TODO: can we be smarter about machine pointer info?
|
|
MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
|
|
MachineMemOperand *MMO = MF.getMachineMemOperand(
|
|
PtrInfo,
|
|
MachineMemOperand::MOLoad |
|
|
MachineMemOperand::MODereferenceable |
|
|
MachineMemOperand::MOInvariant,
|
|
4,
|
|
MinAlign(64, StructOffset));
|
|
|
|
Register LoadAddr;
|
|
|
|
B.materializePtrAdd(LoadAddr, QueuePtr, LLT::scalar(64), StructOffset);
|
|
return B.buildLoad(S32, LoadAddr, *MMO).getReg(0);
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
|
|
MachineInstr &MI, MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
MachineFunction &MF = B.getMF();
|
|
|
|
B.setInstr(MI);
|
|
|
|
const LLT S32 = LLT::scalar(32);
|
|
Register Dst = MI.getOperand(0).getReg();
|
|
Register Src = MI.getOperand(1).getReg();
|
|
|
|
LLT DstTy = MRI.getType(Dst);
|
|
LLT SrcTy = MRI.getType(Src);
|
|
unsigned DestAS = DstTy.getAddressSpace();
|
|
unsigned SrcAS = SrcTy.getAddressSpace();
|
|
|
|
// TODO: Avoid reloading from the queue ptr for each cast, or at least each
|
|
// vector element.
|
|
assert(!DstTy.isVector());
|
|
|
|
const AMDGPUTargetMachine &TM
|
|
= static_cast<const AMDGPUTargetMachine &>(MF.getTarget());
|
|
|
|
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
|
|
if (ST.getTargetLowering()->isNoopAddrSpaceCast(SrcAS, DestAS)) {
|
|
MI.setDesc(B.getTII().get(TargetOpcode::G_BITCAST));
|
|
return true;
|
|
}
|
|
|
|
if (DestAS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
|
|
// Truncate.
|
|
B.buildExtract(Dst, Src, 0);
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
if (SrcAS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
|
|
const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
|
|
uint32_t AddrHiVal = Info->get32BitAddressHighBits();
|
|
|
|
// FIXME: This is a bit ugly due to creating a merge of 2 pointers to
|
|
// another. Merge operands are required to be the same type, but creating an
|
|
// extra ptrtoint would be kind of pointless.
|
|
auto HighAddr = B.buildConstant(
|
|
LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS_32BIT, 32), AddrHiVal);
|
|
B.buildMerge(Dst, {Src, HighAddr});
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
if (SrcAS == AMDGPUAS::FLAT_ADDRESS) {
|
|
assert(DestAS == AMDGPUAS::LOCAL_ADDRESS ||
|
|
DestAS == AMDGPUAS::PRIVATE_ADDRESS);
|
|
unsigned NullVal = TM.getNullPointerValue(DestAS);
|
|
|
|
auto SegmentNull = B.buildConstant(DstTy, NullVal);
|
|
auto FlatNull = B.buildConstant(SrcTy, 0);
|
|
|
|
// Extract low 32-bits of the pointer.
|
|
auto PtrLo32 = B.buildExtract(DstTy, Src, 0);
|
|
|
|
auto CmpRes =
|
|
B.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Src, FlatNull.getReg(0));
|
|
B.buildSelect(Dst, CmpRes, PtrLo32, SegmentNull.getReg(0));
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
if (SrcAS != AMDGPUAS::LOCAL_ADDRESS && SrcAS != AMDGPUAS::PRIVATE_ADDRESS)
|
|
return false;
|
|
|
|
if (!ST.hasFlatAddressSpace())
|
|
return false;
|
|
|
|
auto SegmentNull =
|
|
B.buildConstant(SrcTy, TM.getNullPointerValue(SrcAS));
|
|
auto FlatNull =
|
|
B.buildConstant(DstTy, TM.getNullPointerValue(DestAS));
|
|
|
|
Register ApertureReg = getSegmentAperture(SrcAS, MRI, B);
|
|
if (!ApertureReg.isValid())
|
|
return false;
|
|
|
|
auto CmpRes =
|
|
B.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Src, SegmentNull.getReg(0));
|
|
|
|
// Coerce the type of the low half of the result so we can use merge_values.
|
|
Register SrcAsInt = B.buildPtrToInt(S32, Src).getReg(0);
|
|
|
|
// TODO: Should we allow mismatched types but matching sizes in merges to
|
|
// avoid the ptrtoint?
|
|
auto BuildPtr = B.buildMerge(DstTy, {SrcAsInt, ApertureReg});
|
|
B.buildSelect(Dst, CmpRes, BuildPtr, FlatNull);
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeFrint(
|
|
MachineInstr &MI, MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
B.setInstr(MI);
|
|
|
|
Register Src = MI.getOperand(1).getReg();
|
|
LLT Ty = MRI.getType(Src);
|
|
assert(Ty.isScalar() && Ty.getSizeInBits() == 64);
|
|
|
|
APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
|
|
APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");
|
|
|
|
auto C1 = B.buildFConstant(Ty, C1Val);
|
|
auto CopySign = B.buildFCopysign(Ty, C1, Src);
|
|
|
|
// TODO: Should this propagate fast-math-flags?
|
|
auto Tmp1 = B.buildFAdd(Ty, Src, CopySign);
|
|
auto Tmp2 = B.buildFSub(Ty, Tmp1, CopySign);
|
|
|
|
auto C2 = B.buildFConstant(Ty, C2Val);
|
|
auto Fabs = B.buildFAbs(Ty, Src);
|
|
|
|
auto Cond = B.buildFCmp(CmpInst::FCMP_OGT, LLT::scalar(1), Fabs, C2);
|
|
B.buildSelect(MI.getOperand(0).getReg(), Cond, Src, Tmp2);
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeFceil(
|
|
MachineInstr &MI, MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
B.setInstr(MI);
|
|
|
|
const LLT S1 = LLT::scalar(1);
|
|
const LLT S64 = LLT::scalar(64);
|
|
|
|
Register Src = MI.getOperand(1).getReg();
|
|
assert(MRI.getType(Src) == S64);
|
|
|
|
// result = trunc(src)
|
|
// if (src > 0.0 && src != result)
|
|
// result += 1.0
|
|
|
|
auto Trunc = B.buildIntrinsicTrunc(S64, Src);
|
|
|
|
const auto Zero = B.buildFConstant(S64, 0.0);
|
|
const auto One = B.buildFConstant(S64, 1.0);
|
|
auto Lt0 = B.buildFCmp(CmpInst::FCMP_OGT, S1, Src, Zero);
|
|
auto NeTrunc = B.buildFCmp(CmpInst::FCMP_ONE, S1, Src, Trunc);
|
|
auto And = B.buildAnd(S1, Lt0, NeTrunc);
|
|
auto Add = B.buildSelect(S64, And, One, Zero);
|
|
|
|
// TODO: Should this propagate fast-math-flags?
|
|
B.buildFAdd(MI.getOperand(0).getReg(), Trunc, Add);
|
|
return true;
|
|
}
|
|
|
|
static MachineInstrBuilder extractF64Exponent(unsigned Hi,
|
|
MachineIRBuilder &B) {
|
|
const unsigned FractBits = 52;
|
|
const unsigned ExpBits = 11;
|
|
LLT S32 = LLT::scalar(32);
|
|
|
|
auto Const0 = B.buildConstant(S32, FractBits - 32);
|
|
auto Const1 = B.buildConstant(S32, ExpBits);
|
|
|
|
auto ExpPart = B.buildIntrinsic(Intrinsic::amdgcn_ubfe, {S32}, false)
|
|
.addUse(Const0.getReg(0))
|
|
.addUse(Const1.getReg(0));
|
|
|
|
return B.buildSub(S32, ExpPart, B.buildConstant(S32, 1023));
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeIntrinsicTrunc(
|
|
MachineInstr &MI, MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
B.setInstr(MI);
|
|
|
|
const LLT S1 = LLT::scalar(1);
|
|
const LLT S32 = LLT::scalar(32);
|
|
const LLT S64 = LLT::scalar(64);
|
|
|
|
Register Src = MI.getOperand(1).getReg();
|
|
assert(MRI.getType(Src) == S64);
|
|
|
|
// TODO: Should this use extract since the low half is unused?
|
|
auto Unmerge = B.buildUnmerge({S32, S32}, Src);
|
|
Register Hi = Unmerge.getReg(1);
|
|
|
|
// Extract the upper half, since this is where we will find the sign and
|
|
// exponent.
|
|
auto Exp = extractF64Exponent(Hi, B);
|
|
|
|
const unsigned FractBits = 52;
|
|
|
|
// Extract the sign bit.
|
|
const auto SignBitMask = B.buildConstant(S32, UINT32_C(1) << 31);
|
|
auto SignBit = B.buildAnd(S32, Hi, SignBitMask);
|
|
|
|
const auto FractMask = B.buildConstant(S64, (UINT64_C(1) << FractBits) - 1);
|
|
|
|
const auto Zero32 = B.buildConstant(S32, 0);
|
|
|
|
// Extend back to 64-bits.
|
|
auto SignBit64 = B.buildMerge(S64, {Zero32, SignBit});
|
|
|
|
auto Shr = B.buildAShr(S64, FractMask, Exp);
|
|
auto Not = B.buildNot(S64, Shr);
|
|
auto Tmp0 = B.buildAnd(S64, Src, Not);
|
|
auto FiftyOne = B.buildConstant(S32, FractBits - 1);
|
|
|
|
auto ExpLt0 = B.buildICmp(CmpInst::ICMP_SLT, S1, Exp, Zero32);
|
|
auto ExpGt51 = B.buildICmp(CmpInst::ICMP_SGT, S1, Exp, FiftyOne);
|
|
|
|
auto Tmp1 = B.buildSelect(S64, ExpLt0, SignBit64, Tmp0);
|
|
B.buildSelect(MI.getOperand(0).getReg(), ExpGt51, Src, Tmp1);
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeITOFP(
|
|
MachineInstr &MI, MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B, bool Signed) const {
|
|
B.setInstr(MI);
|
|
|
|
Register Dst = MI.getOperand(0).getReg();
|
|
Register Src = MI.getOperand(1).getReg();
|
|
|
|
const LLT S64 = LLT::scalar(64);
|
|
const LLT S32 = LLT::scalar(32);
|
|
|
|
assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S64);
|
|
|
|
auto Unmerge = B.buildUnmerge({S32, S32}, Src);
|
|
|
|
auto CvtHi = Signed ?
|
|
B.buildSITOFP(S64, Unmerge.getReg(1)) :
|
|
B.buildUITOFP(S64, Unmerge.getReg(1));
|
|
|
|
auto CvtLo = B.buildUITOFP(S64, Unmerge.getReg(0));
|
|
|
|
auto ThirtyTwo = B.buildConstant(S32, 32);
|
|
auto LdExp = B.buildIntrinsic(Intrinsic::amdgcn_ldexp, {S64}, false)
|
|
.addUse(CvtHi.getReg(0))
|
|
.addUse(ThirtyTwo.getReg(0));
|
|
|
|
// TODO: Should this propagate fast-math-flags?
|
|
B.buildFAdd(Dst, LdExp, CvtLo);
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
// TODO: Copied from DAG implementation. Verify logic and document how this
|
|
// actually works.
|
|
bool AMDGPULegalizerInfo::legalizeFPTOI(
|
|
MachineInstr &MI, MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B, bool Signed) const {
|
|
B.setInstr(MI);
|
|
|
|
Register Dst = MI.getOperand(0).getReg();
|
|
Register Src = MI.getOperand(1).getReg();
|
|
|
|
const LLT S64 = LLT::scalar(64);
|
|
const LLT S32 = LLT::scalar(32);
|
|
|
|
assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S64);
|
|
|
|
unsigned Flags = MI.getFlags();
|
|
|
|
auto Trunc = B.buildIntrinsicTrunc(S64, Src, Flags);
|
|
auto K0 = B.buildFConstant(S64, BitsToDouble(UINT64_C(0x3df0000000000000)));
|
|
auto K1 = B.buildFConstant(S64, BitsToDouble(UINT64_C(0xc1f0000000000000)));
|
|
|
|
auto Mul = B.buildFMul(S64, Trunc, K0, Flags);
|
|
auto FloorMul = B.buildFFloor(S64, Mul, Flags);
|
|
auto Fma = B.buildFMA(S64, FloorMul, K1, Trunc, Flags);
|
|
|
|
auto Hi = Signed ?
|
|
B.buildFPTOSI(S32, FloorMul) :
|
|
B.buildFPTOUI(S32, FloorMul);
|
|
auto Lo = B.buildFPTOUI(S32, Fma);
|
|
|
|
B.buildMerge(Dst, { Lo, Hi });
|
|
MI.eraseFromParent();
|
|
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeMinNumMaxNum(
|
|
MachineInstr &MI, MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
MachineFunction &MF = B.getMF();
|
|
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
|
|
|
|
const bool IsIEEEOp = MI.getOpcode() == AMDGPU::G_FMINNUM_IEEE ||
|
|
MI.getOpcode() == AMDGPU::G_FMAXNUM_IEEE;
|
|
|
|
// With ieee_mode disabled, the instructions have the correct behavior
|
|
// already for G_FMINNUM/G_FMAXNUM
|
|
if (!MFI->getMode().IEEE)
|
|
return !IsIEEEOp;
|
|
|
|
if (IsIEEEOp)
|
|
return true;
|
|
|
|
MachineIRBuilder HelperBuilder(MI);
|
|
GISelObserverWrapper DummyObserver;
|
|
LegalizerHelper Helper(MF, DummyObserver, HelperBuilder);
|
|
HelperBuilder.setInstr(MI);
|
|
return Helper.lowerFMinNumMaxNum(MI) == LegalizerHelper::Legalized;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeExtractVectorElt(
|
|
MachineInstr &MI, MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
// TODO: Should move some of this into LegalizerHelper.
|
|
|
|
// TODO: Promote dynamic indexing of s16 to s32
|
|
|
|
// FIXME: Artifact combiner probably should have replaced the truncated
|
|
// constant before this, so we shouldn't need
|
|
// getConstantVRegValWithLookThrough.
|
|
Optional<ValueAndVReg> IdxVal = getConstantVRegValWithLookThrough(
|
|
MI.getOperand(2).getReg(), MRI);
|
|
if (!IdxVal) // Dynamic case will be selected to register indexing.
|
|
return true;
|
|
|
|
Register Dst = MI.getOperand(0).getReg();
|
|
Register Vec = MI.getOperand(1).getReg();
|
|
|
|
LLT VecTy = MRI.getType(Vec);
|
|
LLT EltTy = VecTy.getElementType();
|
|
assert(EltTy == MRI.getType(Dst));
|
|
|
|
B.setInstr(MI);
|
|
|
|
if (IdxVal->Value < VecTy.getNumElements())
|
|
B.buildExtract(Dst, Vec, IdxVal->Value * EltTy.getSizeInBits());
|
|
else
|
|
B.buildUndef(Dst);
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeInsertVectorElt(
|
|
MachineInstr &MI, MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
// TODO: Should move some of this into LegalizerHelper.
|
|
|
|
// TODO: Promote dynamic indexing of s16 to s32
|
|
|
|
// FIXME: Artifact combiner probably should have replaced the truncated
|
|
// constant before this, so we shouldn't need
|
|
// getConstantVRegValWithLookThrough.
|
|
Optional<ValueAndVReg> IdxVal = getConstantVRegValWithLookThrough(
|
|
MI.getOperand(3).getReg(), MRI);
|
|
if (!IdxVal) // Dynamic case will be selected to register indexing.
|
|
return true;
|
|
|
|
Register Dst = MI.getOperand(0).getReg();
|
|
Register Vec = MI.getOperand(1).getReg();
|
|
Register Ins = MI.getOperand(2).getReg();
|
|
|
|
LLT VecTy = MRI.getType(Vec);
|
|
LLT EltTy = VecTy.getElementType();
|
|
assert(EltTy == MRI.getType(Ins));
|
|
|
|
B.setInstr(MI);
|
|
|
|
if (IdxVal->Value < VecTy.getNumElements())
|
|
B.buildInsert(Dst, Vec, Ins, IdxVal->Value * EltTy.getSizeInBits());
|
|
else
|
|
B.buildUndef(Dst);
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeShuffleVector(
|
|
MachineInstr &MI, MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
const LLT V2S16 = LLT::vector(2, 16);
|
|
|
|
Register Dst = MI.getOperand(0).getReg();
|
|
Register Src0 = MI.getOperand(1).getReg();
|
|
LLT DstTy = MRI.getType(Dst);
|
|
LLT SrcTy = MRI.getType(Src0);
|
|
|
|
if (SrcTy == V2S16 && DstTy == V2S16 &&
|
|
AMDGPU::isLegalVOP3PShuffleMask(MI.getOperand(3).getShuffleMask()))
|
|
return true;
|
|
|
|
MachineIRBuilder HelperBuilder(MI);
|
|
GISelObserverWrapper DummyObserver;
|
|
LegalizerHelper Helper(B.getMF(), DummyObserver, HelperBuilder);
|
|
HelperBuilder.setInstr(MI);
|
|
return Helper.lowerShuffleVector(MI) == LegalizerHelper::Legalized;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeSinCos(
|
|
MachineInstr &MI, MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
B.setInstr(MI);
|
|
|
|
Register DstReg = MI.getOperand(0).getReg();
|
|
Register SrcReg = MI.getOperand(1).getReg();
|
|
LLT Ty = MRI.getType(DstReg);
|
|
unsigned Flags = MI.getFlags();
|
|
|
|
Register TrigVal;
|
|
auto OneOver2Pi = B.buildFConstant(Ty, 0.5 / M_PI);
|
|
if (ST.hasTrigReducedRange()) {
|
|
auto MulVal = B.buildFMul(Ty, SrcReg, OneOver2Pi, Flags);
|
|
TrigVal = B.buildIntrinsic(Intrinsic::amdgcn_fract, {Ty}, false)
|
|
.addUse(MulVal.getReg(0))
|
|
.setMIFlags(Flags).getReg(0);
|
|
} else
|
|
TrigVal = B.buildFMul(Ty, SrcReg, OneOver2Pi, Flags).getReg(0);
|
|
|
|
Intrinsic::ID TrigIntrin = MI.getOpcode() == AMDGPU::G_FSIN ?
|
|
Intrinsic::amdgcn_sin : Intrinsic::amdgcn_cos;
|
|
B.buildIntrinsic(TrigIntrin, makeArrayRef<Register>(DstReg), false)
|
|
.addUse(TrigVal)
|
|
.setMIFlags(Flags);
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::buildPCRelGlobalAddress(
|
|
Register DstReg, LLT PtrTy,
|
|
MachineIRBuilder &B, const GlobalValue *GV,
|
|
unsigned Offset, unsigned GAFlags) const {
|
|
// In order to support pc-relative addressing, SI_PC_ADD_REL_OFFSET is lowered
|
|
// to the following code sequence:
|
|
//
|
|
// For constant address space:
|
|
// s_getpc_b64 s[0:1]
|
|
// s_add_u32 s0, s0, $symbol
|
|
// s_addc_u32 s1, s1, 0
|
|
//
|
|
// s_getpc_b64 returns the address of the s_add_u32 instruction and then
|
|
// a fixup or relocation is emitted to replace $symbol with a literal
|
|
// constant, which is a pc-relative offset from the encoding of the $symbol
|
|
// operand to the global variable.
|
|
//
|
|
// For global address space:
|
|
// s_getpc_b64 s[0:1]
|
|
// s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
|
|
// s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
|
|
//
|
|
// s_getpc_b64 returns the address of the s_add_u32 instruction and then
|
|
// fixups or relocations are emitted to replace $symbol@*@lo and
|
|
// $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
|
|
// which is a 64-bit pc-relative offset from the encoding of the $symbol
|
|
// operand to the global variable.
|
|
//
|
|
// What we want here is an offset from the value returned by s_getpc
|
|
// (which is the address of the s_add_u32 instruction) to the global
|
|
// variable, but since the encoding of $symbol starts 4 bytes after the start
|
|
// of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
|
|
// small. This requires us to add 4 to the global variable offset in order to
|
|
// compute the correct address.
|
|
|
|
LLT ConstPtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
|
|
|
|
Register PCReg = PtrTy.getSizeInBits() != 32 ? DstReg :
|
|
B.getMRI()->createGenericVirtualRegister(ConstPtrTy);
|
|
|
|
MachineInstrBuilder MIB = B.buildInstr(AMDGPU::SI_PC_ADD_REL_OFFSET)
|
|
.addDef(PCReg);
|
|
|
|
MIB.addGlobalAddress(GV, Offset + 4, GAFlags);
|
|
if (GAFlags == SIInstrInfo::MO_NONE)
|
|
MIB.addImm(0);
|
|
else
|
|
MIB.addGlobalAddress(GV, Offset + 4, GAFlags + 1);
|
|
|
|
B.getMRI()->setRegClass(PCReg, &AMDGPU::SReg_64RegClass);
|
|
|
|
if (PtrTy.getSizeInBits() == 32)
|
|
B.buildExtract(DstReg, PCReg, 0);
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeGlobalValue(
|
|
MachineInstr &MI, MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
Register DstReg = MI.getOperand(0).getReg();
|
|
LLT Ty = MRI.getType(DstReg);
|
|
unsigned AS = Ty.getAddressSpace();
|
|
|
|
const GlobalValue *GV = MI.getOperand(1).getGlobal();
|
|
MachineFunction &MF = B.getMF();
|
|
SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
|
|
B.setInstr(MI);
|
|
|
|
if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) {
|
|
if (!MFI->isEntryFunction()) {
|
|
const Function &Fn = MF.getFunction();
|
|
DiagnosticInfoUnsupported BadLDSDecl(
|
|
Fn, "local memory global used by non-kernel function", MI.getDebugLoc());
|
|
Fn.getContext().diagnose(BadLDSDecl);
|
|
}
|
|
|
|
// TODO: We could emit code to handle the initialization somewhere.
|
|
if (!AMDGPUTargetLowering::hasDefinedInitializer(GV)) {
|
|
const SITargetLowering *TLI = ST.getTargetLowering();
|
|
if (!TLI->shouldUseLDSConstAddress(GV)) {
|
|
MI.getOperand(1).setTargetFlags(SIInstrInfo::MO_ABS32_LO);
|
|
return true; // Leave in place;
|
|
}
|
|
|
|
B.buildConstant(DstReg, MFI->allocateLDSGlobal(B.getDataLayout(), *GV));
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
const Function &Fn = MF.getFunction();
|
|
DiagnosticInfoUnsupported BadInit(
|
|
Fn, "unsupported initializer for address space", MI.getDebugLoc());
|
|
Fn.getContext().diagnose(BadInit);
|
|
return true;
|
|
}
|
|
|
|
const SITargetLowering *TLI = ST.getTargetLowering();
|
|
|
|
if (TLI->shouldEmitFixup(GV)) {
|
|
buildPCRelGlobalAddress(DstReg, Ty, B, GV, 0);
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
if (TLI->shouldEmitPCReloc(GV)) {
|
|
buildPCRelGlobalAddress(DstReg, Ty, B, GV, 0, SIInstrInfo::MO_REL32);
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
LLT PtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
|
|
Register GOTAddr = MRI.createGenericVirtualRegister(PtrTy);
|
|
|
|
MachineMemOperand *GOTMMO = MF.getMachineMemOperand(
|
|
MachinePointerInfo::getGOT(MF),
|
|
MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
|
|
MachineMemOperand::MOInvariant,
|
|
8 /*Size*/, 8 /*Align*/);
|
|
|
|
buildPCRelGlobalAddress(GOTAddr, PtrTy, B, GV, 0, SIInstrInfo::MO_GOTPCREL32);
|
|
|
|
if (Ty.getSizeInBits() == 32) {
|
|
// Truncate if this is a 32-bit constant adrdess.
|
|
auto Load = B.buildLoad(PtrTy, GOTAddr, *GOTMMO);
|
|
B.buildExtract(DstReg, Load, 0);
|
|
} else
|
|
B.buildLoad(DstReg, GOTAddr, *GOTMMO);
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeLoad(
|
|
MachineInstr &MI, MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B, GISelChangeObserver &Observer) const {
|
|
B.setInstr(MI);
|
|
LLT ConstPtr = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
|
|
auto Cast = B.buildAddrSpaceCast(ConstPtr, MI.getOperand(1).getReg());
|
|
Observer.changingInstr(MI);
|
|
MI.getOperand(1).setReg(Cast.getReg(0));
|
|
Observer.changedInstr(MI);
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeFMad(
|
|
MachineInstr &MI, MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
LLT Ty = MRI.getType(MI.getOperand(0).getReg());
|
|
assert(Ty.isScalar());
|
|
|
|
MachineFunction &MF = B.getMF();
|
|
const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
|
|
|
|
// TODO: Always legal with future ftz flag.
|
|
// FIXME: Do we need just output?
|
|
if (Ty == LLT::scalar(32) && !MFI->getMode().allFP32Denormals())
|
|
return true;
|
|
if (Ty == LLT::scalar(16) && !MFI->getMode().allFP64FP16Denormals())
|
|
return true;
|
|
|
|
MachineIRBuilder HelperBuilder(MI);
|
|
GISelObserverWrapper DummyObserver;
|
|
LegalizerHelper Helper(MF, DummyObserver, HelperBuilder);
|
|
HelperBuilder.setMBB(*MI.getParent());
|
|
return Helper.lowerFMad(MI) == LegalizerHelper::Legalized;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeAtomicCmpXChg(
|
|
MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B) const {
|
|
Register DstReg = MI.getOperand(0).getReg();
|
|
Register PtrReg = MI.getOperand(1).getReg();
|
|
Register CmpVal = MI.getOperand(2).getReg();
|
|
Register NewVal = MI.getOperand(3).getReg();
|
|
|
|
assert(SITargetLowering::isFlatGlobalAddrSpace(
|
|
MRI.getType(PtrReg).getAddressSpace()) &&
|
|
"this should not have been custom lowered");
|
|
|
|
LLT ValTy = MRI.getType(CmpVal);
|
|
LLT VecTy = LLT::vector(2, ValTy);
|
|
|
|
B.setInstr(MI);
|
|
Register PackedVal = B.buildBuildVector(VecTy, { NewVal, CmpVal }).getReg(0);
|
|
|
|
B.buildInstr(AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG)
|
|
.addDef(DstReg)
|
|
.addUse(PtrReg)
|
|
.addUse(PackedVal)
|
|
.setMemRefs(MI.memoperands());
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeFlog(
|
|
MachineInstr &MI, MachineIRBuilder &B, double Log2BaseInverted) const {
|
|
Register Dst = MI.getOperand(0).getReg();
|
|
Register Src = MI.getOperand(1).getReg();
|
|
LLT Ty = B.getMRI()->getType(Dst);
|
|
unsigned Flags = MI.getFlags();
|
|
B.setInstr(MI);
|
|
|
|
auto Log2Operand = B.buildFLog2(Ty, Src, Flags);
|
|
auto Log2BaseInvertedOperand = B.buildFConstant(Ty, Log2BaseInverted);
|
|
|
|
B.buildFMul(Dst, Log2Operand, Log2BaseInvertedOperand, Flags);
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeFExp(MachineInstr &MI,
|
|
MachineIRBuilder &B) const {
|
|
Register Dst = MI.getOperand(0).getReg();
|
|
Register Src = MI.getOperand(1).getReg();
|
|
unsigned Flags = MI.getFlags();
|
|
LLT Ty = B.getMRI()->getType(Dst);
|
|
B.setInstr(MI);
|
|
|
|
auto K = B.buildFConstant(Ty, numbers::log2e);
|
|
auto Mul = B.buildFMul(Ty, Src, K, Flags);
|
|
B.buildFExp2(Dst, Mul, Flags);
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeFPow(MachineInstr &MI,
|
|
MachineIRBuilder &B) const {
|
|
Register Dst = MI.getOperand(0).getReg();
|
|
Register Src0 = MI.getOperand(1).getReg();
|
|
Register Src1 = MI.getOperand(2).getReg();
|
|
unsigned Flags = MI.getFlags();
|
|
LLT Ty = B.getMRI()->getType(Dst);
|
|
B.setInstr(MI);
|
|
const LLT S16 = LLT::scalar(16);
|
|
const LLT S32 = LLT::scalar(32);
|
|
|
|
if (Ty == S32) {
|
|
auto Log = B.buildFLog2(S32, Src0, Flags);
|
|
auto Mul = B.buildIntrinsic(Intrinsic::amdgcn_fmul_legacy, {S32}, false)
|
|
.addUse(Log.getReg(0))
|
|
.addUse(Src1)
|
|
.setMIFlags(Flags);
|
|
B.buildFExp2(Dst, Mul, Flags);
|
|
} else if (Ty == S16) {
|
|
// There's no f16 fmul_legacy, so we need to convert for it.
|
|
auto Log = B.buildFLog2(S16, Src0, Flags);
|
|
auto Ext0 = B.buildFPExt(S32, Log, Flags);
|
|
auto Ext1 = B.buildFPExt(S32, Src1, Flags);
|
|
auto Mul = B.buildIntrinsic(Intrinsic::amdgcn_fmul_legacy, {S32}, false)
|
|
.addUse(Ext0.getReg(0))
|
|
.addUse(Ext1.getReg(0))
|
|
.setMIFlags(Flags);
|
|
|
|
B.buildFExp2(Dst, B.buildFPTrunc(S16, Mul), Flags);
|
|
} else
|
|
return false;
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
// Find a source register, ignoring any possible source modifiers.
|
|
static Register stripAnySourceMods(Register OrigSrc, MachineRegisterInfo &MRI) {
|
|
Register ModSrc = OrigSrc;
|
|
if (MachineInstr *SrcFNeg = getOpcodeDef(AMDGPU::G_FNEG, ModSrc, MRI)) {
|
|
ModSrc = SrcFNeg->getOperand(1).getReg();
|
|
if (MachineInstr *SrcFAbs = getOpcodeDef(AMDGPU::G_FABS, ModSrc, MRI))
|
|
ModSrc = SrcFAbs->getOperand(1).getReg();
|
|
} else if (MachineInstr *SrcFAbs = getOpcodeDef(AMDGPU::G_FABS, ModSrc, MRI))
|
|
ModSrc = SrcFAbs->getOperand(1).getReg();
|
|
return ModSrc;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeFFloor(MachineInstr &MI,
|
|
MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
B.setInstr(MI);
|
|
|
|
const LLT S1 = LLT::scalar(1);
|
|
const LLT S64 = LLT::scalar(64);
|
|
Register Dst = MI.getOperand(0).getReg();
|
|
Register OrigSrc = MI.getOperand(1).getReg();
|
|
unsigned Flags = MI.getFlags();
|
|
assert(ST.hasFractBug() && MRI.getType(Dst) == S64 &&
|
|
"this should not have been custom lowered");
|
|
|
|
// V_FRACT is buggy on SI, so the F32 version is never used and (x-floor(x))
|
|
// is used instead. However, SI doesn't have V_FLOOR_F64, so the most
|
|
// efficient way to implement it is using V_FRACT_F64. The workaround for the
|
|
// V_FRACT bug is:
|
|
// fract(x) = isnan(x) ? x : min(V_FRACT(x), 0.99999999999999999)
|
|
//
|
|
// Convert floor(x) to (x - fract(x))
|
|
|
|
auto Fract = B.buildIntrinsic(Intrinsic::amdgcn_fract, {S64}, false)
|
|
.addUse(OrigSrc)
|
|
.setMIFlags(Flags);
|
|
|
|
// Give source modifier matching some assistance before obscuring a foldable
|
|
// pattern.
|
|
|
|
// TODO: We can avoid the neg on the fract? The input sign to fract
|
|
// shouldn't matter?
|
|
Register ModSrc = stripAnySourceMods(OrigSrc, MRI);
|
|
|
|
auto Const = B.buildFConstant(S64, BitsToDouble(0x3fefffffffffffff));
|
|
|
|
Register Min = MRI.createGenericVirtualRegister(S64);
|
|
|
|
// We don't need to concern ourselves with the snan handling difference, so
|
|
// use the one which will directly select.
|
|
const SIMachineFunctionInfo *MFI = B.getMF().getInfo<SIMachineFunctionInfo>();
|
|
if (MFI->getMode().IEEE)
|
|
B.buildFMinNumIEEE(Min, Fract, Const, Flags);
|
|
else
|
|
B.buildFMinNum(Min, Fract, Const, Flags);
|
|
|
|
Register CorrectedFract = Min;
|
|
if (!MI.getFlag(MachineInstr::FmNoNans)) {
|
|
auto IsNan = B.buildFCmp(CmpInst::FCMP_ORD, S1, ModSrc, ModSrc, Flags);
|
|
CorrectedFract = B.buildSelect(S64, IsNan, ModSrc, Min, Flags).getReg(0);
|
|
}
|
|
|
|
auto NegFract = B.buildFNeg(S64, CorrectedFract, Flags);
|
|
B.buildFAdd(Dst, OrigSrc, NegFract, Flags);
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
// Turn an illegal packed v2s16 build vector into bit operations.
|
|
// TODO: This should probably be a bitcast action in LegalizerHelper.
|
|
bool AMDGPULegalizerInfo::legalizeBuildVector(
|
|
MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B) const {
|
|
Register Dst = MI.getOperand(0).getReg();
|
|
LLT DstTy = MRI.getType(Dst);
|
|
const LLT S32 = LLT::scalar(32);
|
|
const LLT V2S16 = LLT::vector(2, 16);
|
|
(void)DstTy;
|
|
(void)V2S16;
|
|
assert(DstTy == V2S16);
|
|
|
|
Register Src0 = MI.getOperand(1).getReg();
|
|
Register Src1 = MI.getOperand(2).getReg();
|
|
assert(MRI.getType(Src0) == LLT::scalar(16));
|
|
|
|
B.setInstr(MI);
|
|
auto Merge = B.buildMerge(S32, {Src0, Src1});
|
|
B.buildBitcast(Dst, Merge);
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
// Return the use branch instruction, otherwise null if the usage is invalid.
|
|
static MachineInstr *verifyCFIntrinsic(MachineInstr &MI,
|
|
MachineRegisterInfo &MRI,
|
|
MachineInstr *&Br) {
|
|
Register CondDef = MI.getOperand(0).getReg();
|
|
if (!MRI.hasOneNonDBGUse(CondDef))
|
|
return nullptr;
|
|
|
|
MachineInstr &UseMI = *MRI.use_instr_nodbg_begin(CondDef);
|
|
if (UseMI.getParent() != MI.getParent() ||
|
|
UseMI.getOpcode() != AMDGPU::G_BRCOND)
|
|
return nullptr;
|
|
|
|
// Make sure the cond br is followed by a G_BR
|
|
MachineBasicBlock::iterator Next = std::next(UseMI.getIterator());
|
|
if (Next != MI.getParent()->end()) {
|
|
if (Next->getOpcode() != AMDGPU::G_BR)
|
|
return nullptr;
|
|
Br = &*Next;
|
|
}
|
|
|
|
return &UseMI;
|
|
}
|
|
|
|
Register AMDGPULegalizerInfo::insertLiveInCopy(MachineIRBuilder &B,
|
|
MachineRegisterInfo &MRI,
|
|
Register LiveIn,
|
|
Register PhyReg) const {
|
|
assert(PhyReg.isPhysical() && "Physical register expected");
|
|
|
|
// Insert the live-in copy, if required, by defining destination virtual
|
|
// register.
|
|
// FIXME: It seems EmitLiveInCopies isn't called anywhere?
|
|
if (!MRI.getVRegDef(LiveIn)) {
|
|
// FIXME: Should have scoped insert pt
|
|
MachineBasicBlock &OrigInsBB = B.getMBB();
|
|
auto OrigInsPt = B.getInsertPt();
|
|
|
|
MachineBasicBlock &EntryMBB = B.getMF().front();
|
|
EntryMBB.addLiveIn(PhyReg);
|
|
B.setInsertPt(EntryMBB, EntryMBB.begin());
|
|
B.buildCopy(LiveIn, PhyReg);
|
|
|
|
B.setInsertPt(OrigInsBB, OrigInsPt);
|
|
}
|
|
|
|
return LiveIn;
|
|
}
|
|
|
|
Register AMDGPULegalizerInfo::getLiveInRegister(MachineIRBuilder &B,
|
|
MachineRegisterInfo &MRI,
|
|
Register PhyReg, LLT Ty,
|
|
bool InsertLiveInCopy) const {
|
|
assert(PhyReg.isPhysical() && "Physical register expected");
|
|
|
|
// Get or create virtual live-in regester
|
|
Register LiveIn = MRI.getLiveInVirtReg(PhyReg);
|
|
if (!LiveIn) {
|
|
LiveIn = MRI.createGenericVirtualRegister(Ty);
|
|
MRI.addLiveIn(PhyReg, LiveIn);
|
|
}
|
|
|
|
// When the actual true copy required is from virtual register to physical
|
|
// register (to be inserted later), live-in copy insertion from physical
|
|
// to register virtual register is not required
|
|
if (!InsertLiveInCopy)
|
|
return LiveIn;
|
|
|
|
return insertLiveInCopy(B, MRI, LiveIn, PhyReg);
|
|
}
|
|
|
|
const ArgDescriptor *AMDGPULegalizerInfo::getArgDescriptor(
|
|
MachineIRBuilder &B, AMDGPUFunctionArgInfo::PreloadedValue ArgType) const {
|
|
const SIMachineFunctionInfo *MFI = B.getMF().getInfo<SIMachineFunctionInfo>();
|
|
const ArgDescriptor *Arg;
|
|
const TargetRegisterClass *RC;
|
|
std::tie(Arg, RC) = MFI->getPreloadedValue(ArgType);
|
|
if (!Arg) {
|
|
LLVM_DEBUG(dbgs() << "Required arg register missing\n");
|
|
return nullptr;
|
|
}
|
|
return Arg;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::loadInputValue(Register DstReg, MachineIRBuilder &B,
|
|
const ArgDescriptor *Arg) const {
|
|
if (!Arg->isRegister() || !Arg->getRegister().isValid())
|
|
return false; // TODO: Handle these
|
|
|
|
Register SrcReg = Arg->getRegister();
|
|
assert(SrcReg.isPhysical() && "Physical register expected");
|
|
assert(DstReg.isVirtual() && "Virtual register expected");
|
|
|
|
MachineRegisterInfo &MRI = *B.getMRI();
|
|
|
|
LLT Ty = MRI.getType(DstReg);
|
|
Register LiveIn = getLiveInRegister(B, MRI, SrcReg, Ty);
|
|
|
|
if (Arg->isMasked()) {
|
|
// TODO: Should we try to emit this once in the entry block?
|
|
const LLT S32 = LLT::scalar(32);
|
|
const unsigned Mask = Arg->getMask();
|
|
const unsigned Shift = countTrailingZeros<unsigned>(Mask);
|
|
|
|
Register AndMaskSrc = LiveIn;
|
|
|
|
if (Shift != 0) {
|
|
auto ShiftAmt = B.buildConstant(S32, Shift);
|
|
AndMaskSrc = B.buildLShr(S32, LiveIn, ShiftAmt).getReg(0);
|
|
}
|
|
|
|
B.buildAnd(DstReg, AndMaskSrc, B.buildConstant(S32, Mask >> Shift));
|
|
} else {
|
|
B.buildCopy(DstReg, LiveIn);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizePreloadedArgIntrin(
|
|
MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B,
|
|
AMDGPUFunctionArgInfo::PreloadedValue ArgType) const {
|
|
B.setInstr(MI);
|
|
|
|
const ArgDescriptor *Arg = getArgDescriptor(B, ArgType);
|
|
if (!Arg)
|
|
return false;
|
|
|
|
if (!loadInputValue(MI.getOperand(0).getReg(), B, Arg))
|
|
return false;
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeFDIV(MachineInstr &MI,
|
|
MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
B.setInstr(MI);
|
|
Register Dst = MI.getOperand(0).getReg();
|
|
LLT DstTy = MRI.getType(Dst);
|
|
LLT S16 = LLT::scalar(16);
|
|
LLT S32 = LLT::scalar(32);
|
|
LLT S64 = LLT::scalar(64);
|
|
|
|
if (legalizeFastUnsafeFDIV(MI, MRI, B))
|
|
return true;
|
|
|
|
if (DstTy == S16)
|
|
return legalizeFDIV16(MI, MRI, B);
|
|
if (DstTy == S32)
|
|
return legalizeFDIV32(MI, MRI, B);
|
|
if (DstTy == S64)
|
|
return legalizeFDIV64(MI, MRI, B);
|
|
|
|
return false;
|
|
}
|
|
|
|
static Register buildDivRCP(MachineIRBuilder &B, Register Src) {
|
|
const LLT S32 = LLT::scalar(32);
|
|
|
|
auto Cvt0 = B.buildUITOFP(S32, Src);
|
|
auto RcpIFlag = B.buildInstr(AMDGPU::G_AMDGPU_RCP_IFLAG, {S32}, {Cvt0});
|
|
auto FPUIntMaxPlus1 = B.buildFConstant(S32, BitsToFloat(0x4f800000));
|
|
auto Mul = B.buildFMul(S32, RcpIFlag, FPUIntMaxPlus1);
|
|
return B.buildFPTOUI(S32, Mul).getReg(0);
|
|
}
|
|
|
|
void AMDGPULegalizerInfo::legalizeUDIV_UREM32Impl(MachineIRBuilder &B,
|
|
Register DstReg,
|
|
Register Num,
|
|
Register Den,
|
|
bool IsRem) const {
|
|
const LLT S1 = LLT::scalar(1);
|
|
const LLT S32 = LLT::scalar(32);
|
|
|
|
// RCP = URECIP(Den) = 2^32 / Den + e
|
|
// e is rounding error.
|
|
auto RCP = buildDivRCP(B, Den);
|
|
|
|
// RCP_LO = mul(RCP, Den)
|
|
auto RCP_LO = B.buildMul(S32, RCP, Den);
|
|
|
|
// RCP_HI = mulhu (RCP, Den) */
|
|
auto RCP_HI = B.buildUMulH(S32, RCP, Den);
|
|
|
|
// NEG_RCP_LO = -RCP_LO
|
|
auto Zero = B.buildConstant(S32, 0);
|
|
auto NEG_RCP_LO = B.buildSub(S32, Zero, RCP_LO);
|
|
|
|
// ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
|
|
auto CmpRcpHiZero = B.buildICmp(CmpInst::ICMP_EQ, S1, RCP_HI, Zero);
|
|
auto ABS_RCP_LO = B.buildSelect(S32, CmpRcpHiZero, NEG_RCP_LO, RCP_LO);
|
|
|
|
// Calculate the rounding error from the URECIP instruction
|
|
// E = mulhu(ABS_RCP_LO, RCP)
|
|
auto E = B.buildUMulH(S32, ABS_RCP_LO, RCP);
|
|
|
|
// RCP_A_E = RCP + E
|
|
auto RCP_A_E = B.buildAdd(S32, RCP, E);
|
|
|
|
// RCP_S_E = RCP - E
|
|
auto RCP_S_E = B.buildSub(S32, RCP, E);
|
|
|
|
// Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
|
|
auto Tmp0 = B.buildSelect(S32, CmpRcpHiZero, RCP_A_E, RCP_S_E);
|
|
|
|
// Quotient = mulhu(Tmp0, Num)stmp
|
|
auto Quotient = B.buildUMulH(S32, Tmp0, Num);
|
|
|
|
// Num_S_Remainder = Quotient * Den
|
|
auto Num_S_Remainder = B.buildMul(S32, Quotient, Den);
|
|
|
|
// Remainder = Num - Num_S_Remainder
|
|
auto Remainder = B.buildSub(S32, Num, Num_S_Remainder);
|
|
|
|
// Remainder_GE_Den = Remainder >= Den
|
|
auto Remainder_GE_Den = B.buildICmp(CmpInst::ICMP_UGE, S1, Remainder, Den);
|
|
|
|
// Remainder_GE_Zero = Num >= Num_S_Remainder;
|
|
auto Remainder_GE_Zero = B.buildICmp(CmpInst::ICMP_UGE, S1,
|
|
Num, Num_S_Remainder);
|
|
|
|
// Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
|
|
auto Tmp1 = B.buildAnd(S1, Remainder_GE_Den, Remainder_GE_Zero);
|
|
|
|
// Calculate Division result:
|
|
|
|
// Quotient_A_One = Quotient + 1
|
|
auto One = B.buildConstant(S32, 1);
|
|
auto Quotient_A_One = B.buildAdd(S32, Quotient, One);
|
|
|
|
// Quotient_S_One = Quotient - 1
|
|
auto Quotient_S_One = B.buildSub(S32, Quotient, One);
|
|
|
|
// Div = (Tmp1 == 0 ? Quotient_A_One : Quotient)
|
|
auto Div = B.buildSelect(S32, Tmp1, Quotient, Quotient_A_One);
|
|
|
|
// Div = (Remainder_GE_Zero ? Div : Quotient_S_One)
|
|
if (IsRem) {
|
|
Div = B.buildSelect(S32, Remainder_GE_Zero, Div, Quotient_S_One);
|
|
|
|
// Calculate Rem result:
|
|
auto Remainder_S_Den = B.buildSub(S32, Remainder, Den);
|
|
|
|
// Remainder_A_Den = Remainder + Den
|
|
auto Remainder_A_Den = B.buildAdd(S32, Remainder, Den);
|
|
|
|
// Rem = (Tmp1 ? Remainder_S_Den : Remainder)
|
|
auto Rem = B.buildSelect(S32, Tmp1, Remainder_S_Den, Remainder);
|
|
|
|
// Rem = (Remainder_GE_Zero ? Rem : Remainder_A_Den)
|
|
B.buildSelect(DstReg, Remainder_GE_Zero, Rem, Remainder_A_Den);
|
|
} else {
|
|
B.buildSelect(DstReg, Remainder_GE_Zero, Div, Quotient_S_One);
|
|
}
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeUDIV_UREM32(MachineInstr &MI,
|
|
MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
B.setInstr(MI);
|
|
const bool IsRem = MI.getOpcode() == AMDGPU::G_UREM;
|
|
Register DstReg = MI.getOperand(0).getReg();
|
|
Register Num = MI.getOperand(1).getReg();
|
|
Register Den = MI.getOperand(2).getReg();
|
|
legalizeUDIV_UREM32Impl(B, DstReg, Num, Den, IsRem);
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeUDIV_UREM(MachineInstr &MI,
|
|
MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
if (MRI.getType(MI.getOperand(0).getReg()) == LLT::scalar(32))
|
|
return legalizeUDIV_UREM32(MI, MRI, B);
|
|
return false;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeSDIV_SREM32(MachineInstr &MI,
|
|
MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
B.setInstr(MI);
|
|
const LLT S32 = LLT::scalar(32);
|
|
|
|
const bool IsRem = MI.getOpcode() == AMDGPU::G_SREM;
|
|
Register DstReg = MI.getOperand(0).getReg();
|
|
Register LHS = MI.getOperand(1).getReg();
|
|
Register RHS = MI.getOperand(2).getReg();
|
|
|
|
auto ThirtyOne = B.buildConstant(S32, 31);
|
|
auto LHSign = B.buildAShr(S32, LHS, ThirtyOne);
|
|
auto RHSign = B.buildAShr(S32, LHS, ThirtyOne);
|
|
|
|
LHS = B.buildAdd(S32, LHS, LHSign).getReg(0);
|
|
RHS = B.buildAdd(S32, RHS, RHSign).getReg(0);
|
|
|
|
LHS = B.buildXor(S32, LHS, LHSign).getReg(0);
|
|
RHS = B.buildXor(S32, RHS, RHSign).getReg(0);
|
|
|
|
Register UDivRem = MRI.createGenericVirtualRegister(S32);
|
|
legalizeUDIV_UREM32Impl(B, UDivRem, LHS, RHS, IsRem);
|
|
|
|
if (IsRem) {
|
|
auto RSign = LHSign; // Remainder sign is the same as LHS
|
|
UDivRem = B.buildXor(S32, UDivRem, RSign).getReg(0);
|
|
B.buildSub(DstReg, UDivRem, RSign);
|
|
} else {
|
|
auto DSign = B.buildXor(S32, LHSign, RHSign);
|
|
UDivRem = B.buildXor(S32, UDivRem, DSign).getReg(0);
|
|
B.buildSub(DstReg, UDivRem, DSign);
|
|
}
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeSDIV_SREM(MachineInstr &MI,
|
|
MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
if (MRI.getType(MI.getOperand(0).getReg()) == LLT::scalar(32))
|
|
return legalizeSDIV_SREM32(MI, MRI, B);
|
|
return false;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeFastUnsafeFDIV(MachineInstr &MI,
|
|
MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
Register Res = MI.getOperand(0).getReg();
|
|
Register LHS = MI.getOperand(1).getReg();
|
|
Register RHS = MI.getOperand(2).getReg();
|
|
|
|
uint16_t Flags = MI.getFlags();
|
|
|
|
LLT ResTy = MRI.getType(Res);
|
|
LLT S32 = LLT::scalar(32);
|
|
LLT S64 = LLT::scalar(64);
|
|
|
|
const MachineFunction &MF = B.getMF();
|
|
bool Unsafe =
|
|
MF.getTarget().Options.UnsafeFPMath || MI.getFlag(MachineInstr::FmArcp);
|
|
|
|
if (!MF.getTarget().Options.UnsafeFPMath && ResTy == S64)
|
|
return false;
|
|
|
|
if (!Unsafe && ResTy == S32 &&
|
|
MF.getInfo<SIMachineFunctionInfo>()->getMode().allFP32Denormals())
|
|
return false;
|
|
|
|
if (auto CLHS = getConstantFPVRegVal(LHS, MRI)) {
|
|
// 1 / x -> RCP(x)
|
|
if (CLHS->isExactlyValue(1.0)) {
|
|
B.buildIntrinsic(Intrinsic::amdgcn_rcp, Res, false)
|
|
.addUse(RHS)
|
|
.setMIFlags(Flags);
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
// -1 / x -> RCP( FNEG(x) )
|
|
if (CLHS->isExactlyValue(-1.0)) {
|
|
auto FNeg = B.buildFNeg(ResTy, RHS, Flags);
|
|
B.buildIntrinsic(Intrinsic::amdgcn_rcp, Res, false)
|
|
.addUse(FNeg.getReg(0))
|
|
.setMIFlags(Flags);
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
}
|
|
|
|
// x / y -> x * (1.0 / y)
|
|
if (Unsafe) {
|
|
auto RCP = B.buildIntrinsic(Intrinsic::amdgcn_rcp, {ResTy}, false)
|
|
.addUse(RHS)
|
|
.setMIFlags(Flags);
|
|
B.buildFMul(Res, LHS, RCP, Flags);
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeFDIV16(MachineInstr &MI,
|
|
MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
B.setInstr(MI);
|
|
Register Res = MI.getOperand(0).getReg();
|
|
Register LHS = MI.getOperand(1).getReg();
|
|
Register RHS = MI.getOperand(2).getReg();
|
|
|
|
uint16_t Flags = MI.getFlags();
|
|
|
|
LLT S16 = LLT::scalar(16);
|
|
LLT S32 = LLT::scalar(32);
|
|
|
|
auto LHSExt = B.buildFPExt(S32, LHS, Flags);
|
|
auto RHSExt = B.buildFPExt(S32, RHS, Flags);
|
|
|
|
auto RCP = B.buildIntrinsic(Intrinsic::amdgcn_rcp, {S32}, false)
|
|
.addUse(RHSExt.getReg(0))
|
|
.setMIFlags(Flags);
|
|
|
|
auto QUOT = B.buildFMul(S32, LHSExt, RCP, Flags);
|
|
auto RDst = B.buildFPTrunc(S16, QUOT, Flags);
|
|
|
|
B.buildIntrinsic(Intrinsic::amdgcn_div_fixup, Res, false)
|
|
.addUse(RDst.getReg(0))
|
|
.addUse(RHS)
|
|
.addUse(LHS)
|
|
.setMIFlags(Flags);
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
// Enable or disable FP32 denorm mode. When 'Enable' is true, emit instructions
|
|
// to enable denorm mode. When 'Enable' is false, disable denorm mode.
|
|
static void toggleSPDenormMode(bool Enable,
|
|
MachineIRBuilder &B,
|
|
const GCNSubtarget &ST,
|
|
AMDGPU::SIModeRegisterDefaults Mode) {
|
|
// Set SP denorm mode to this value.
|
|
unsigned SPDenormMode =
|
|
Enable ? FP_DENORM_FLUSH_NONE : Mode.fpDenormModeSPValue();
|
|
|
|
if (ST.hasDenormModeInst()) {
|
|
// Preserve default FP64FP16 denorm mode while updating FP32 mode.
|
|
uint32_t DPDenormModeDefault = Mode.fpDenormModeDPValue();
|
|
|
|
uint32_t NewDenormModeValue = SPDenormMode | (DPDenormModeDefault << 2);
|
|
B.buildInstr(AMDGPU::S_DENORM_MODE)
|
|
.addImm(NewDenormModeValue);
|
|
|
|
} else {
|
|
// Select FP32 bit field in mode register.
|
|
unsigned SPDenormModeBitField = AMDGPU::Hwreg::ID_MODE |
|
|
(4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
|
|
(1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
|
|
|
|
B.buildInstr(AMDGPU::S_SETREG_IMM32_B32)
|
|
.addImm(SPDenormMode)
|
|
.addImm(SPDenormModeBitField);
|
|
}
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeFDIV32(MachineInstr &MI,
|
|
MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
B.setInstr(MI);
|
|
Register Res = MI.getOperand(0).getReg();
|
|
Register LHS = MI.getOperand(1).getReg();
|
|
Register RHS = MI.getOperand(2).getReg();
|
|
const SIMachineFunctionInfo *MFI = B.getMF().getInfo<SIMachineFunctionInfo>();
|
|
AMDGPU::SIModeRegisterDefaults Mode = MFI->getMode();
|
|
|
|
uint16_t Flags = MI.getFlags();
|
|
|
|
LLT S32 = LLT::scalar(32);
|
|
LLT S1 = LLT::scalar(1);
|
|
|
|
auto One = B.buildFConstant(S32, 1.0f);
|
|
|
|
auto DenominatorScaled =
|
|
B.buildIntrinsic(Intrinsic::amdgcn_div_scale, {S32, S1}, false)
|
|
.addUse(RHS)
|
|
.addUse(LHS)
|
|
.addImm(1)
|
|
.setMIFlags(Flags);
|
|
auto NumeratorScaled =
|
|
B.buildIntrinsic(Intrinsic::amdgcn_div_scale, {S32, S1}, false)
|
|
.addUse(LHS)
|
|
.addUse(RHS)
|
|
.addImm(0)
|
|
.setMIFlags(Flags);
|
|
|
|
auto ApproxRcp = B.buildIntrinsic(Intrinsic::amdgcn_rcp, {S32}, false)
|
|
.addUse(DenominatorScaled.getReg(0))
|
|
.setMIFlags(Flags);
|
|
auto NegDivScale0 = B.buildFNeg(S32, DenominatorScaled, Flags);
|
|
|
|
// FIXME: Doesn't correctly model the FP mode switch, and the FP operations
|
|
// aren't modeled as reading it.
|
|
if (!Mode.allFP32Denormals())
|
|
toggleSPDenormMode(true, B, ST, Mode);
|
|
|
|
auto Fma0 = B.buildFMA(S32, NegDivScale0, ApproxRcp, One, Flags);
|
|
auto Fma1 = B.buildFMA(S32, Fma0, ApproxRcp, ApproxRcp, Flags);
|
|
auto Mul = B.buildFMul(S32, NumeratorScaled, Fma1, Flags);
|
|
auto Fma2 = B.buildFMA(S32, NegDivScale0, Mul, NumeratorScaled, Flags);
|
|
auto Fma3 = B.buildFMA(S32, Fma2, Fma1, Mul, Flags);
|
|
auto Fma4 = B.buildFMA(S32, NegDivScale0, Fma3, NumeratorScaled, Flags);
|
|
|
|
if (!Mode.allFP32Denormals())
|
|
toggleSPDenormMode(false, B, ST, Mode);
|
|
|
|
auto Fmas = B.buildIntrinsic(Intrinsic::amdgcn_div_fmas, {S32}, false)
|
|
.addUse(Fma4.getReg(0))
|
|
.addUse(Fma1.getReg(0))
|
|
.addUse(Fma3.getReg(0))
|
|
.addUse(NumeratorScaled.getReg(1))
|
|
.setMIFlags(Flags);
|
|
|
|
B.buildIntrinsic(Intrinsic::amdgcn_div_fixup, Res, false)
|
|
.addUse(Fmas.getReg(0))
|
|
.addUse(RHS)
|
|
.addUse(LHS)
|
|
.setMIFlags(Flags);
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeFDIV64(MachineInstr &MI,
|
|
MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
B.setInstr(MI);
|
|
Register Res = MI.getOperand(0).getReg();
|
|
Register LHS = MI.getOperand(1).getReg();
|
|
Register RHS = MI.getOperand(2).getReg();
|
|
|
|
uint16_t Flags = MI.getFlags();
|
|
|
|
LLT S64 = LLT::scalar(64);
|
|
LLT S1 = LLT::scalar(1);
|
|
|
|
auto One = B.buildFConstant(S64, 1.0);
|
|
|
|
auto DivScale0 = B.buildIntrinsic(Intrinsic::amdgcn_div_scale, {S64, S1}, false)
|
|
.addUse(LHS)
|
|
.addUse(RHS)
|
|
.addImm(1)
|
|
.setMIFlags(Flags);
|
|
|
|
auto NegDivScale0 = B.buildFNeg(S64, DivScale0.getReg(0), Flags);
|
|
|
|
auto Rcp = B.buildIntrinsic(Intrinsic::amdgcn_rcp, {S64}, false)
|
|
.addUse(DivScale0.getReg(0))
|
|
.setMIFlags(Flags);
|
|
|
|
auto Fma0 = B.buildFMA(S64, NegDivScale0, Rcp, One, Flags);
|
|
auto Fma1 = B.buildFMA(S64, Rcp, Fma0, Rcp, Flags);
|
|
auto Fma2 = B.buildFMA(S64, NegDivScale0, Fma1, One, Flags);
|
|
|
|
auto DivScale1 = B.buildIntrinsic(Intrinsic::amdgcn_div_scale, {S64, S1}, false)
|
|
.addUse(LHS)
|
|
.addUse(RHS)
|
|
.addImm(0)
|
|
.setMIFlags(Flags);
|
|
|
|
auto Fma3 = B.buildFMA(S64, Fma1, Fma2, Fma1, Flags);
|
|
auto Mul = B.buildMul(S64, DivScale1.getReg(0), Fma3, Flags);
|
|
auto Fma4 = B.buildFMA(S64, NegDivScale0, Mul, DivScale1.getReg(0), Flags);
|
|
|
|
Register Scale;
|
|
if (!ST.hasUsableDivScaleConditionOutput()) {
|
|
// Workaround a hardware bug on SI where the condition output from div_scale
|
|
// is not usable.
|
|
|
|
LLT S32 = LLT::scalar(32);
|
|
|
|
auto NumUnmerge = B.buildUnmerge(S32, LHS);
|
|
auto DenUnmerge = B.buildUnmerge(S32, RHS);
|
|
auto Scale0Unmerge = B.buildUnmerge(S32, DivScale0);
|
|
auto Scale1Unmerge = B.buildUnmerge(S32, DivScale1);
|
|
|
|
auto CmpNum = B.buildICmp(ICmpInst::ICMP_EQ, S1, NumUnmerge.getReg(1),
|
|
Scale1Unmerge.getReg(1));
|
|
auto CmpDen = B.buildICmp(ICmpInst::ICMP_EQ, S1, DenUnmerge.getReg(1),
|
|
Scale0Unmerge.getReg(1));
|
|
Scale = B.buildXor(S1, CmpNum, CmpDen).getReg(0);
|
|
} else {
|
|
Scale = DivScale1.getReg(1);
|
|
}
|
|
|
|
auto Fmas = B.buildIntrinsic(Intrinsic::amdgcn_div_fmas, {S64}, false)
|
|
.addUse(Fma4.getReg(0))
|
|
.addUse(Fma3.getReg(0))
|
|
.addUse(Mul.getReg(0))
|
|
.addUse(Scale)
|
|
.setMIFlags(Flags);
|
|
|
|
B.buildIntrinsic(Intrinsic::amdgcn_div_fixup, makeArrayRef(Res), false)
|
|
.addUse(Fmas.getReg(0))
|
|
.addUse(RHS)
|
|
.addUse(LHS)
|
|
.setMIFlags(Flags);
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeFDIVFastIntrin(MachineInstr &MI,
|
|
MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
B.setInstr(MI);
|
|
Register Res = MI.getOperand(0).getReg();
|
|
Register LHS = MI.getOperand(2).getReg();
|
|
Register RHS = MI.getOperand(3).getReg();
|
|
uint16_t Flags = MI.getFlags();
|
|
|
|
LLT S32 = LLT::scalar(32);
|
|
LLT S1 = LLT::scalar(1);
|
|
|
|
auto Abs = B.buildFAbs(S32, RHS, Flags);
|
|
const APFloat C0Val(1.0f);
|
|
|
|
auto C0 = B.buildConstant(S32, 0x6f800000);
|
|
auto C1 = B.buildConstant(S32, 0x2f800000);
|
|
auto C2 = B.buildConstant(S32, FloatToBits(1.0f));
|
|
|
|
auto CmpRes = B.buildFCmp(CmpInst::FCMP_OGT, S1, Abs, C0, Flags);
|
|
auto Sel = B.buildSelect(S32, CmpRes, C1, C2, Flags);
|
|
|
|
auto Mul0 = B.buildFMul(S32, RHS, Sel, Flags);
|
|
|
|
auto RCP = B.buildIntrinsic(Intrinsic::amdgcn_rcp, {S32}, false)
|
|
.addUse(Mul0.getReg(0))
|
|
.setMIFlags(Flags);
|
|
|
|
auto Mul1 = B.buildFMul(S32, LHS, RCP, Flags);
|
|
|
|
B.buildFMul(Res, Sel, Mul1, Flags);
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeImplicitArgPtr(MachineInstr &MI,
|
|
MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
const SIMachineFunctionInfo *MFI = B.getMF().getInfo<SIMachineFunctionInfo>();
|
|
if (!MFI->isEntryFunction()) {
|
|
return legalizePreloadedArgIntrin(MI, MRI, B,
|
|
AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
|
|
}
|
|
|
|
B.setInstr(MI);
|
|
|
|
uint64_t Offset =
|
|
ST.getTargetLowering()->getImplicitParameterOffset(
|
|
B.getMF(), AMDGPUTargetLowering::FIRST_IMPLICIT);
|
|
Register DstReg = MI.getOperand(0).getReg();
|
|
LLT DstTy = MRI.getType(DstReg);
|
|
LLT IdxTy = LLT::scalar(DstTy.getSizeInBits());
|
|
|
|
const ArgDescriptor *Arg;
|
|
const TargetRegisterClass *RC;
|
|
std::tie(Arg, RC)
|
|
= MFI->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
|
|
if (!Arg)
|
|
return false;
|
|
|
|
Register KernargPtrReg = MRI.createGenericVirtualRegister(DstTy);
|
|
if (!loadInputValue(KernargPtrReg, B, Arg))
|
|
return false;
|
|
|
|
B.buildPtrAdd(DstReg, KernargPtrReg, B.buildConstant(IdxTy, Offset).getReg(0));
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeIsAddrSpace(MachineInstr &MI,
|
|
MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B,
|
|
unsigned AddrSpace) const {
|
|
B.setInstr(MI);
|
|
Register ApertureReg = getSegmentAperture(AddrSpace, MRI, B);
|
|
auto Hi32 = B.buildExtract(LLT::scalar(32), MI.getOperand(2).getReg(), 32);
|
|
B.buildICmp(ICmpInst::ICMP_EQ, MI.getOperand(0), Hi32, ApertureReg);
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
// The raw.(t)buffer and struct.(t)buffer intrinsics have two offset args:
|
|
// offset (the offset that is included in bounds checking and swizzling, to be
|
|
// split between the instruction's voffset and immoffset fields) and soffset
|
|
// (the offset that is excluded from bounds checking and swizzling, to go in
|
|
// the instruction's soffset field). This function takes the first kind of
|
|
// offset and figures out how to split it between voffset and immoffset.
|
|
std::tuple<Register, unsigned, unsigned>
|
|
AMDGPULegalizerInfo::splitBufferOffsets(MachineIRBuilder &B,
|
|
Register OrigOffset) const {
|
|
const unsigned MaxImm = 4095;
|
|
Register BaseReg;
|
|
unsigned TotalConstOffset;
|
|
MachineInstr *OffsetDef;
|
|
const LLT S32 = LLT::scalar(32);
|
|
|
|
std::tie(BaseReg, TotalConstOffset, OffsetDef)
|
|
= AMDGPU::getBaseWithConstantOffset(*B.getMRI(), OrigOffset);
|
|
|
|
unsigned ImmOffset = TotalConstOffset;
|
|
|
|
// If the immediate value is too big for the immoffset field, put the value
|
|
// and -4096 into the immoffset field so that the value that is copied/added
|
|
// for the voffset field is a multiple of 4096, and it stands more chance
|
|
// of being CSEd with the copy/add for another similar load/store.
|
|
// However, do not do that rounding down to a multiple of 4096 if that is a
|
|
// negative number, as it appears to be illegal to have a negative offset
|
|
// in the vgpr, even if adding the immediate offset makes it positive.
|
|
unsigned Overflow = ImmOffset & ~MaxImm;
|
|
ImmOffset -= Overflow;
|
|
if ((int32_t)Overflow < 0) {
|
|
Overflow += ImmOffset;
|
|
ImmOffset = 0;
|
|
}
|
|
|
|
if (Overflow != 0) {
|
|
if (!BaseReg) {
|
|
BaseReg = B.buildConstant(S32, Overflow).getReg(0);
|
|
} else {
|
|
auto OverflowVal = B.buildConstant(S32, Overflow);
|
|
BaseReg = B.buildAdd(S32, BaseReg, OverflowVal).getReg(0);
|
|
}
|
|
}
|
|
|
|
if (!BaseReg)
|
|
BaseReg = B.buildConstant(S32, 0).getReg(0);
|
|
|
|
return std::make_tuple(BaseReg, ImmOffset, TotalConstOffset);
|
|
}
|
|
|
|
/// Handle register layout difference for f16 images for some subtargets.
|
|
Register AMDGPULegalizerInfo::handleD16VData(MachineIRBuilder &B,
|
|
MachineRegisterInfo &MRI,
|
|
Register Reg) const {
|
|
if (!ST.hasUnpackedD16VMem())
|
|
return Reg;
|
|
|
|
const LLT S16 = LLT::scalar(16);
|
|
const LLT S32 = LLT::scalar(32);
|
|
LLT StoreVT = MRI.getType(Reg);
|
|
assert(StoreVT.isVector() && StoreVT.getElementType() == S16);
|
|
|
|
auto Unmerge = B.buildUnmerge(S16, Reg);
|
|
|
|
SmallVector<Register, 4> WideRegs;
|
|
for (int I = 0, E = Unmerge->getNumOperands() - 1; I != E; ++I)
|
|
WideRegs.push_back(B.buildAnyExt(S32, Unmerge.getReg(I)).getReg(0));
|
|
|
|
int NumElts = StoreVT.getNumElements();
|
|
|
|
return B.buildBuildVector(LLT::vector(NumElts, S32), WideRegs).getReg(0);
|
|
}
|
|
|
|
Register AMDGPULegalizerInfo::fixStoreSourceType(
|
|
MachineIRBuilder &B, Register VData, bool IsFormat) const {
|
|
MachineRegisterInfo *MRI = B.getMRI();
|
|
LLT Ty = MRI->getType(VData);
|
|
|
|
const LLT S16 = LLT::scalar(16);
|
|
|
|
// Fixup illegal register types for i8 stores.
|
|
if (Ty == LLT::scalar(8) || Ty == S16) {
|
|
Register AnyExt = B.buildAnyExt(LLT::scalar(32), VData).getReg(0);
|
|
return AnyExt;
|
|
}
|
|
|
|
if (Ty.isVector()) {
|
|
if (Ty.getElementType() == S16 && Ty.getNumElements() <= 4) {
|
|
if (IsFormat)
|
|
return handleD16VData(B, *MRI, VData);
|
|
}
|
|
}
|
|
|
|
return VData;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeBufferStore(MachineInstr &MI,
|
|
MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B,
|
|
bool IsTyped,
|
|
bool IsFormat) const {
|
|
B.setInstr(MI);
|
|
|
|
Register VData = MI.getOperand(1).getReg();
|
|
LLT Ty = MRI.getType(VData);
|
|
LLT EltTy = Ty.getScalarType();
|
|
const bool IsD16 = IsFormat && (EltTy.getSizeInBits() == 16);
|
|
const LLT S32 = LLT::scalar(32);
|
|
|
|
VData = fixStoreSourceType(B, VData, IsFormat);
|
|
Register RSrc = MI.getOperand(2).getReg();
|
|
|
|
MachineMemOperand *MMO = *MI.memoperands_begin();
|
|
const int MemSize = MMO->getSize();
|
|
|
|
unsigned ImmOffset;
|
|
unsigned TotalOffset;
|
|
|
|
// The typed intrinsics add an immediate after the registers.
|
|
const unsigned NumVIndexOps = IsTyped ? 8 : 7;
|
|
|
|
// The struct intrinsic variants add one additional operand over raw.
|
|
const bool HasVIndex = MI.getNumOperands() == NumVIndexOps;
|
|
Register VIndex;
|
|
int OpOffset = 0;
|
|
if (HasVIndex) {
|
|
VIndex = MI.getOperand(3).getReg();
|
|
OpOffset = 1;
|
|
}
|
|
|
|
Register VOffset = MI.getOperand(3 + OpOffset).getReg();
|
|
Register SOffset = MI.getOperand(4 + OpOffset).getReg();
|
|
|
|
unsigned Format = 0;
|
|
if (IsTyped) {
|
|
Format = MI.getOperand(5 + OpOffset).getImm();
|
|
++OpOffset;
|
|
}
|
|
|
|
unsigned AuxiliaryData = MI.getOperand(5 + OpOffset).getImm();
|
|
|
|
std::tie(VOffset, ImmOffset, TotalOffset) = splitBufferOffsets(B, VOffset);
|
|
if (TotalOffset != 0)
|
|
MMO = B.getMF().getMachineMemOperand(MMO, TotalOffset, MemSize);
|
|
|
|
unsigned Opc;
|
|
if (IsTyped) {
|
|
Opc = IsD16 ? AMDGPU::G_AMDGPU_TBUFFER_STORE_FORMAT_D16 :
|
|
AMDGPU::G_AMDGPU_TBUFFER_STORE_FORMAT;
|
|
} else if (IsFormat) {
|
|
Opc = IsD16 ? AMDGPU::G_AMDGPU_BUFFER_STORE_FORMAT_D16 :
|
|
AMDGPU::G_AMDGPU_BUFFER_STORE_FORMAT;
|
|
} else {
|
|
switch (MemSize) {
|
|
case 1:
|
|
Opc = AMDGPU::G_AMDGPU_BUFFER_STORE_BYTE;
|
|
break;
|
|
case 2:
|
|
Opc = AMDGPU::G_AMDGPU_BUFFER_STORE_SHORT;
|
|
break;
|
|
default:
|
|
Opc = AMDGPU::G_AMDGPU_BUFFER_STORE;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (!VIndex)
|
|
VIndex = B.buildConstant(S32, 0).getReg(0);
|
|
|
|
auto MIB = B.buildInstr(Opc)
|
|
.addUse(VData) // vdata
|
|
.addUse(RSrc) // rsrc
|
|
.addUse(VIndex) // vindex
|
|
.addUse(VOffset) // voffset
|
|
.addUse(SOffset) // soffset
|
|
.addImm(ImmOffset); // offset(imm)
|
|
|
|
if (IsTyped)
|
|
MIB.addImm(Format);
|
|
|
|
MIB.addImm(AuxiliaryData) // cachepolicy, swizzled buffer(imm)
|
|
.addImm(HasVIndex ? -1 : 0) // idxen(imm)
|
|
.addMemOperand(MMO);
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeBufferLoad(MachineInstr &MI,
|
|
MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B,
|
|
bool IsFormat,
|
|
bool IsTyped) const {
|
|
B.setInstr(MI);
|
|
|
|
// FIXME: Verifier should enforce 1 MMO for these intrinsics.
|
|
MachineMemOperand *MMO = *MI.memoperands_begin();
|
|
const int MemSize = MMO->getSize();
|
|
const LLT S32 = LLT::scalar(32);
|
|
|
|
Register Dst = MI.getOperand(0).getReg();
|
|
Register RSrc = MI.getOperand(2).getReg();
|
|
|
|
// The typed intrinsics add an immediate after the registers.
|
|
const unsigned NumVIndexOps = IsTyped ? 8 : 7;
|
|
|
|
// The struct intrinsic variants add one additional operand over raw.
|
|
const bool HasVIndex = MI.getNumOperands() == NumVIndexOps;
|
|
Register VIndex;
|
|
int OpOffset = 0;
|
|
if (HasVIndex) {
|
|
VIndex = MI.getOperand(3).getReg();
|
|
OpOffset = 1;
|
|
}
|
|
|
|
Register VOffset = MI.getOperand(3 + OpOffset).getReg();
|
|
Register SOffset = MI.getOperand(4 + OpOffset).getReg();
|
|
|
|
unsigned Format = 0;
|
|
if (IsTyped) {
|
|
Format = MI.getOperand(5 + OpOffset).getImm();
|
|
++OpOffset;
|
|
}
|
|
|
|
unsigned AuxiliaryData = MI.getOperand(5 + OpOffset).getImm();
|
|
unsigned ImmOffset;
|
|
unsigned TotalOffset;
|
|
|
|
LLT Ty = MRI.getType(Dst);
|
|
LLT EltTy = Ty.getScalarType();
|
|
const bool IsD16 = IsFormat && (EltTy.getSizeInBits() == 16);
|
|
const bool Unpacked = ST.hasUnpackedD16VMem();
|
|
|
|
std::tie(VOffset, ImmOffset, TotalOffset) = splitBufferOffsets(B, VOffset);
|
|
if (TotalOffset != 0)
|
|
MMO = B.getMF().getMachineMemOperand(MMO, TotalOffset, MemSize);
|
|
|
|
unsigned Opc;
|
|
|
|
if (IsTyped) {
|
|
Opc = IsD16 ? AMDGPU::G_AMDGPU_TBUFFER_LOAD_FORMAT_D16 :
|
|
AMDGPU::G_AMDGPU_TBUFFER_LOAD_FORMAT;
|
|
} else if (IsFormat) {
|
|
Opc = IsD16 ? AMDGPU::G_AMDGPU_BUFFER_LOAD_FORMAT_D16 :
|
|
AMDGPU::G_AMDGPU_BUFFER_LOAD_FORMAT;
|
|
} else {
|
|
switch (MemSize) {
|
|
case 1:
|
|
Opc = AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE;
|
|
break;
|
|
case 2:
|
|
Opc = AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT;
|
|
break;
|
|
default:
|
|
Opc = AMDGPU::G_AMDGPU_BUFFER_LOAD;
|
|
break;
|
|
}
|
|
}
|
|
|
|
Register LoadDstReg;
|
|
|
|
bool IsExtLoad = (!IsD16 && MemSize < 4) || (IsD16 && !Ty.isVector());
|
|
LLT UnpackedTy = Ty.changeElementSize(32);
|
|
|
|
if (IsExtLoad)
|
|
LoadDstReg = B.getMRI()->createGenericVirtualRegister(S32);
|
|
else if (Unpacked && IsD16 && Ty.isVector())
|
|
LoadDstReg = B.getMRI()->createGenericVirtualRegister(UnpackedTy);
|
|
else
|
|
LoadDstReg = Dst;
|
|
|
|
if (!VIndex)
|
|
VIndex = B.buildConstant(S32, 0).getReg(0);
|
|
|
|
auto MIB = B.buildInstr(Opc)
|
|
.addDef(LoadDstReg) // vdata
|
|
.addUse(RSrc) // rsrc
|
|
.addUse(VIndex) // vindex
|
|
.addUse(VOffset) // voffset
|
|
.addUse(SOffset) // soffset
|
|
.addImm(ImmOffset); // offset(imm)
|
|
|
|
if (IsTyped)
|
|
MIB.addImm(Format);
|
|
|
|
MIB.addImm(AuxiliaryData) // cachepolicy, swizzled buffer(imm)
|
|
.addImm(HasVIndex ? -1 : 0) // idxen(imm)
|
|
.addMemOperand(MMO);
|
|
|
|
if (LoadDstReg != Dst) {
|
|
B.setInsertPt(B.getMBB(), ++B.getInsertPt());
|
|
|
|
// Widen result for extending loads was widened.
|
|
if (IsExtLoad)
|
|
B.buildTrunc(Dst, LoadDstReg);
|
|
else {
|
|
// Repack to original 16-bit vector result
|
|
// FIXME: G_TRUNC should work, but legalization currently fails
|
|
auto Unmerge = B.buildUnmerge(S32, LoadDstReg);
|
|
SmallVector<Register, 4> Repack;
|
|
for (unsigned I = 0, N = Unmerge->getNumOperands() - 1; I != N; ++I)
|
|
Repack.push_back(B.buildTrunc(EltTy, Unmerge.getReg(I)).getReg(0));
|
|
B.buildMerge(Dst, Repack);
|
|
}
|
|
}
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeAtomicIncDec(MachineInstr &MI,
|
|
MachineIRBuilder &B,
|
|
bool IsInc) const {
|
|
B.setInstr(MI);
|
|
unsigned Opc = IsInc ? AMDGPU::G_AMDGPU_ATOMIC_INC :
|
|
AMDGPU::G_AMDGPU_ATOMIC_DEC;
|
|
B.buildInstr(Opc)
|
|
.addDef(MI.getOperand(0).getReg())
|
|
.addUse(MI.getOperand(2).getReg())
|
|
.addUse(MI.getOperand(3).getReg())
|
|
.cloneMemRefs(MI);
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
static unsigned getBufferAtomicPseudo(Intrinsic::ID IntrID) {
|
|
switch (IntrID) {
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_swap:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_swap:
|
|
return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SWAP;
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_add:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_add:
|
|
return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_ADD;
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_sub:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_sub:
|
|
return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SUB;
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_smin:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_smin:
|
|
return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMIN;
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_umin:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_umin:
|
|
return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMIN;
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_smax:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_smax:
|
|
return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_SMAX;
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_umax:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_umax:
|
|
return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_UMAX;
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_and:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_and:
|
|
return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_AND;
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_or:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_or:
|
|
return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_OR;
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_xor:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_xor:
|
|
return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_XOR;
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_inc:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_inc:
|
|
return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_INC;
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_dec:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_dec:
|
|
return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_DEC;
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap:
|
|
return AMDGPU::G_AMDGPU_BUFFER_ATOMIC_CMPSWAP;
|
|
default:
|
|
llvm_unreachable("unhandled atomic opcode");
|
|
}
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeBufferAtomic(MachineInstr &MI,
|
|
MachineIRBuilder &B,
|
|
Intrinsic::ID IID) const {
|
|
B.setInstr(MI);
|
|
|
|
const bool IsCmpSwap = IID == Intrinsic::amdgcn_raw_buffer_atomic_cmpswap ||
|
|
IID == Intrinsic::amdgcn_struct_buffer_atomic_cmpswap;
|
|
|
|
Register Dst = MI.getOperand(0).getReg();
|
|
Register VData = MI.getOperand(2).getReg();
|
|
|
|
Register CmpVal;
|
|
int OpOffset = 0;
|
|
|
|
if (IsCmpSwap) {
|
|
CmpVal = MI.getOperand(3 + OpOffset).getReg();
|
|
++OpOffset;
|
|
}
|
|
|
|
Register RSrc = MI.getOperand(3 + OpOffset).getReg();
|
|
const unsigned NumVIndexOps = IsCmpSwap ? 9 : 8;
|
|
|
|
// The struct intrinsic variants add one additional operand over raw.
|
|
const bool HasVIndex = MI.getNumOperands() == NumVIndexOps;
|
|
Register VIndex;
|
|
if (HasVIndex) {
|
|
VIndex = MI.getOperand(4 + OpOffset).getReg();
|
|
++OpOffset;
|
|
}
|
|
|
|
Register VOffset = MI.getOperand(4 + OpOffset).getReg();
|
|
Register SOffset = MI.getOperand(5 + OpOffset).getReg();
|
|
unsigned AuxiliaryData = MI.getOperand(6 + OpOffset).getImm();
|
|
|
|
MachineMemOperand *MMO = *MI.memoperands_begin();
|
|
|
|
unsigned ImmOffset;
|
|
unsigned TotalOffset;
|
|
std::tie(VOffset, ImmOffset, TotalOffset) = splitBufferOffsets(B, VOffset);
|
|
if (TotalOffset != 0)
|
|
MMO = B.getMF().getMachineMemOperand(MMO, TotalOffset, MMO->getSize());
|
|
|
|
if (!VIndex)
|
|
VIndex = B.buildConstant(LLT::scalar(32), 0).getReg(0);
|
|
|
|
auto MIB = B.buildInstr(getBufferAtomicPseudo(IID))
|
|
.addDef(Dst)
|
|
.addUse(VData); // vdata
|
|
|
|
if (IsCmpSwap)
|
|
MIB.addReg(CmpVal);
|
|
|
|
MIB.addUse(RSrc) // rsrc
|
|
.addUse(VIndex) // vindex
|
|
.addUse(VOffset) // voffset
|
|
.addUse(SOffset) // soffset
|
|
.addImm(ImmOffset) // offset(imm)
|
|
.addImm(AuxiliaryData) // cachepolicy, swizzled buffer(imm)
|
|
.addImm(HasVIndex ? -1 : 0) // idxen(imm)
|
|
.addMemOperand(MMO);
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
// Produce a vector of s16 elements from s32 pieces.
|
|
static void truncToS16Vector(MachineIRBuilder &B, Register DstReg,
|
|
ArrayRef<Register> UnmergeParts) {
|
|
const LLT S16 = LLT::scalar(16);
|
|
|
|
SmallVector<Register, 4> RemergeParts(UnmergeParts.size());
|
|
for (int I = 0, E = UnmergeParts.size(); I != E; ++I)
|
|
RemergeParts[I] = B.buildTrunc(S16, UnmergeParts[I]).getReg(0);
|
|
|
|
B.buildBuildVector(DstReg, RemergeParts);
|
|
}
|
|
|
|
/// Convert a set of s32 registers to a result vector with s16 elements.
|
|
static void bitcastToS16Vector(MachineIRBuilder &B, Register DstReg,
|
|
ArrayRef<Register> UnmergeParts) {
|
|
MachineRegisterInfo &MRI = *B.getMRI();
|
|
const LLT V2S16 = LLT::vector(2, 16);
|
|
LLT TargetTy = MRI.getType(DstReg);
|
|
int NumElts = UnmergeParts.size();
|
|
|
|
if (NumElts == 1) {
|
|
assert(TargetTy == V2S16);
|
|
B.buildBitcast(DstReg, UnmergeParts[0]);
|
|
return;
|
|
}
|
|
|
|
SmallVector<Register, 4> RemergeParts(NumElts);
|
|
for (int I = 0; I != NumElts; ++I)
|
|
RemergeParts[I] = B.buildBitcast(V2S16, UnmergeParts[I]).getReg(0);
|
|
|
|
if (TargetTy.getSizeInBits() == 32u * NumElts) {
|
|
B.buildConcatVectors(DstReg, RemergeParts);
|
|
return;
|
|
}
|
|
|
|
const LLT V3S16 = LLT::vector(3, 16);
|
|
const LLT V6S16 = LLT::vector(6, 16);
|
|
|
|
// Widen to v6s16 and unpack v3 parts.
|
|
assert(TargetTy == V3S16);
|
|
|
|
RemergeParts.push_back(B.buildUndef(V2S16).getReg(0));
|
|
auto Concat = B.buildConcatVectors(V6S16, RemergeParts);
|
|
B.buildUnmerge({DstReg, MRI.createGenericVirtualRegister(V3S16)}, Concat);
|
|
}
|
|
|
|
// FIXME: Just vector trunc should be sufficent, but legalization currently
|
|
// broken.
|
|
static void repackUnpackedD16Load(MachineIRBuilder &B, Register DstReg,
|
|
Register WideDstReg) {
|
|
const LLT S32 = LLT::scalar(32);
|
|
const LLT S16 = LLT::scalar(16);
|
|
|
|
auto Unmerge = B.buildUnmerge(S32, WideDstReg);
|
|
|
|
int NumOps = Unmerge->getNumOperands() - 1;
|
|
SmallVector<Register, 4> RemergeParts(NumOps);
|
|
for (int I = 0; I != NumOps; ++I)
|
|
RemergeParts[I] = B.buildTrunc(S16, Unmerge.getReg(I)).getReg(0);
|
|
|
|
B.buildBuildVector(DstReg, RemergeParts);
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
|
|
MachineInstr &MI, MachineIRBuilder &B,
|
|
GISelChangeObserver &Observer,
|
|
const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr) const {
|
|
bool IsTFE = MI.getNumExplicitDefs() == 2;
|
|
|
|
// We are only processing the operands of d16 image operations on subtargets
|
|
// that use the unpacked register layout, or need to repack the TFE result.
|
|
|
|
// TODO: Need to handle a16 images too
|
|
// TODO: Do we need to guard against already legalized intrinsics?
|
|
if (!IsTFE && !ST.hasUnpackedD16VMem())
|
|
return true;
|
|
|
|
const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
|
|
AMDGPU::getMIMGBaseOpcodeInfo(ImageDimIntr->BaseOpcode);
|
|
|
|
if (BaseOpcode->Atomic) // No d16 atomics, or TFE.
|
|
return true;
|
|
|
|
B.setInstr(MI);
|
|
|
|
MachineRegisterInfo *MRI = B.getMRI();
|
|
const LLT S32 = LLT::scalar(32);
|
|
const LLT S16 = LLT::scalar(16);
|
|
|
|
if (BaseOpcode->Store) { // No TFE for stores?
|
|
Register VData = MI.getOperand(1).getReg();
|
|
LLT Ty = MRI->getType(VData);
|
|
if (!Ty.isVector() || Ty.getElementType() != S16)
|
|
return true;
|
|
|
|
B.setInstr(MI);
|
|
|
|
Observer.changingInstr(MI);
|
|
MI.getOperand(1).setReg(handleD16VData(B, *MRI, VData));
|
|
Observer.changedInstr(MI);
|
|
return true;
|
|
}
|
|
|
|
Register DstReg = MI.getOperand(0).getReg();
|
|
LLT Ty = MRI->getType(DstReg);
|
|
const LLT EltTy = Ty.getScalarType();
|
|
const bool IsD16 = Ty.getScalarType() == S16;
|
|
const unsigned NumElts = Ty.isVector() ? Ty.getNumElements() : 1;
|
|
|
|
if (IsTFE) {
|
|
// In the IR, TFE is supposed to be used with a 2 element struct return
|
|
// type. The intruction really returns these two values in one contiguous
|
|
// register, with one additional dword beyond the loaded data. Rewrite the
|
|
// return type to use a single register result.
|
|
Register Dst1Reg = MI.getOperand(1).getReg();
|
|
if (MRI->getType(Dst1Reg) != S32)
|
|
return false;
|
|
|
|
// TODO: Make sure the TFE operand bit is set.
|
|
|
|
// The raw dword aligned data component of the load. The only legal cases
|
|
// where this matters should be when using the packed D16 format, for
|
|
// s16 -> <2 x s16>, and <3 x s16> -> <4 x s16>,
|
|
LLT RoundedTy;
|
|
LLT TFETy;
|
|
|
|
if (IsD16 && ST.hasUnpackedD16VMem()) {
|
|
RoundedTy = LLT::scalarOrVector(NumElts, 32);
|
|
TFETy = LLT::vector(NumElts + 1, 32);
|
|
} else {
|
|
unsigned EltSize = Ty.getScalarSizeInBits();
|
|
unsigned RoundedElts = (Ty.getSizeInBits() + 31) / 32;
|
|
unsigned RoundedSize = 32 * RoundedElts;
|
|
RoundedTy = LLT::scalarOrVector(RoundedSize / EltSize, EltSize);
|
|
TFETy = LLT::vector(RoundedSize / 32 + 1, S32);
|
|
}
|
|
|
|
Register TFEReg = MRI->createGenericVirtualRegister(TFETy);
|
|
Observer.changingInstr(MI);
|
|
|
|
MI.getOperand(0).setReg(TFEReg);
|
|
MI.RemoveOperand(1);
|
|
|
|
Observer.changedInstr(MI);
|
|
|
|
// Insert after the instruction.
|
|
B.setInsertPt(*MI.getParent(), ++MI.getIterator());
|
|
|
|
// Now figure out how to copy the new result register back into the old
|
|
// result.
|
|
|
|
SmallVector<Register, 5> UnmergeResults(TFETy.getNumElements(), Dst1Reg);
|
|
int NumDataElts = TFETy.getNumElements() - 1;
|
|
|
|
if (!Ty.isVector()) {
|
|
// Simplest case is a trivial unmerge (plus a truncate for d16).
|
|
UnmergeResults[0] = Ty == S32 ?
|
|
DstReg : MRI->createGenericVirtualRegister(S32);
|
|
|
|
B.buildUnmerge(UnmergeResults, TFEReg);
|
|
if (Ty != S32)
|
|
B.buildTrunc(DstReg, UnmergeResults[0]);
|
|
return true;
|
|
}
|
|
|
|
// We have to repack into a new vector of some kind.
|
|
for (int I = 0; I != NumDataElts; ++I)
|
|
UnmergeResults[I] = MRI->createGenericVirtualRegister(S32);
|
|
B.buildUnmerge(UnmergeResults, TFEReg);
|
|
|
|
// Drop the final TFE element.
|
|
ArrayRef<Register> DataPart(UnmergeResults.data(), NumDataElts);
|
|
|
|
if (EltTy == S32)
|
|
B.buildBuildVector(DstReg, DataPart);
|
|
else if (ST.hasUnpackedD16VMem())
|
|
truncToS16Vector(B, DstReg, DataPart);
|
|
else
|
|
bitcastToS16Vector(B, DstReg, DataPart);
|
|
|
|
return true;
|
|
}
|
|
|
|
// Must be an image load.
|
|
if (!Ty.isVector() || Ty.getElementType() != S16)
|
|
return true;
|
|
|
|
B.setInsertPt(*MI.getParent(), ++MI.getIterator());
|
|
|
|
LLT WidenedTy = Ty.changeElementType(S32);
|
|
Register WideDstReg = MRI->createGenericVirtualRegister(WidenedTy);
|
|
|
|
Observer.changingInstr(MI);
|
|
MI.getOperand(0).setReg(WideDstReg);
|
|
Observer.changedInstr(MI);
|
|
|
|
repackUnpackedD16Load(B, DstReg, WideDstReg);
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeSBufferLoad(
|
|
MachineInstr &MI, MachineIRBuilder &B,
|
|
GISelChangeObserver &Observer) const {
|
|
Register Dst = MI.getOperand(0).getReg();
|
|
LLT Ty = B.getMRI()->getType(Dst);
|
|
unsigned Size = Ty.getSizeInBits();
|
|
MachineFunction &MF = B.getMF();
|
|
|
|
Observer.changingInstr(MI);
|
|
|
|
// FIXME: We don't really need this intermediate instruction. The intrinsic
|
|
// should be fixed to have a memory operand. Since it's readnone, we're not
|
|
// allowed to add one.
|
|
MI.setDesc(B.getTII().get(AMDGPU::G_AMDGPU_S_BUFFER_LOAD));
|
|
MI.RemoveOperand(1); // Remove intrinsic ID
|
|
|
|
// FIXME: When intrinsic definition is fixed, this should have an MMO already.
|
|
// TODO: Should this use datalayout alignment?
|
|
const unsigned MemSize = (Size + 7) / 8;
|
|
const unsigned MemAlign = 4;
|
|
MachineMemOperand *MMO = MF.getMachineMemOperand(
|
|
MachinePointerInfo(),
|
|
MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
|
|
MachineMemOperand::MOInvariant, MemSize, MemAlign);
|
|
MI.addMemOperand(MF, MMO);
|
|
|
|
// There are no 96-bit result scalar loads, but widening to 128-bit should
|
|
// always be legal. We may need to restore this to a 96-bit result if it turns
|
|
// out this needs to be converted to a vector load during RegBankSelect.
|
|
if (!isPowerOf2_32(Size)) {
|
|
LegalizerHelper Helper(MF, *this, Observer, B);
|
|
B.setInstr(MI);
|
|
|
|
if (Ty.isVector())
|
|
Helper.moreElementsVectorDst(MI, getPow2VectorType(Ty), 0);
|
|
else
|
|
Helper.widenScalarDst(MI, getPow2ScalarType(Ty), 0);
|
|
}
|
|
|
|
Observer.changedInstr(MI);
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeTrapIntrinsic(MachineInstr &MI,
|
|
MachineRegisterInfo &MRI,
|
|
MachineIRBuilder &B) const {
|
|
B.setInstr(MI);
|
|
|
|
// Is non-HSA path or trap-handler disabled? then, insert s_endpgm instruction
|
|
if (ST.getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
|
|
!ST.isTrapHandlerEnabled()) {
|
|
B.buildInstr(AMDGPU::S_ENDPGM).addImm(0);
|
|
} else {
|
|
// Pass queue pointer to trap handler as input, and insert trap instruction
|
|
// Reference: https://llvm.org/docs/AMDGPUUsage.html#trap-handler-abi
|
|
const ArgDescriptor *Arg =
|
|
getArgDescriptor(B, AMDGPUFunctionArgInfo::QUEUE_PTR);
|
|
if (!Arg)
|
|
return false;
|
|
MachineRegisterInfo &MRI = *B.getMRI();
|
|
Register SGPR01(AMDGPU::SGPR0_SGPR1);
|
|
Register LiveIn = getLiveInRegister(
|
|
B, MRI, SGPR01, LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64),
|
|
/*InsertLiveInCopy=*/false);
|
|
if (!loadInputValue(LiveIn, B, Arg))
|
|
return false;
|
|
B.buildCopy(SGPR01, LiveIn);
|
|
B.buildInstr(AMDGPU::S_TRAP)
|
|
.addImm(GCNSubtarget::TrapIDLLVMTrap)
|
|
.addReg(SGPR01, RegState::Implicit);
|
|
}
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeDebugTrapIntrinsic(
|
|
MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B) const {
|
|
B.setInstr(MI);
|
|
|
|
// Is non-HSA path or trap-handler disabled? then, report a warning
|
|
// accordingly
|
|
if (ST.getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbiHsa ||
|
|
!ST.isTrapHandlerEnabled()) {
|
|
DiagnosticInfoUnsupported NoTrap(B.getMF().getFunction(),
|
|
"debugtrap handler not supported",
|
|
MI.getDebugLoc(), DS_Warning);
|
|
LLVMContext &Ctx = B.getMF().getFunction().getContext();
|
|
Ctx.diagnose(NoTrap);
|
|
} else {
|
|
// Insert debug-trap instruction
|
|
B.buildInstr(AMDGPU::S_TRAP).addImm(GCNSubtarget::TrapIDLLVMDebugTrap);
|
|
}
|
|
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPULegalizerInfo::legalizeIntrinsic(MachineInstr &MI,
|
|
MachineIRBuilder &B,
|
|
GISelChangeObserver &Observer) const {
|
|
MachineRegisterInfo &MRI = *B.getMRI();
|
|
|
|
// Replace the use G_BRCOND with the exec manipulate and branch pseudos.
|
|
auto IntrID = MI.getIntrinsicID();
|
|
switch (IntrID) {
|
|
case Intrinsic::amdgcn_if:
|
|
case Intrinsic::amdgcn_else: {
|
|
MachineInstr *Br = nullptr;
|
|
if (MachineInstr *BrCond = verifyCFIntrinsic(MI, MRI, Br)) {
|
|
const SIRegisterInfo *TRI
|
|
= static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo());
|
|
|
|
B.setInstr(*BrCond);
|
|
Register Def = MI.getOperand(1).getReg();
|
|
Register Use = MI.getOperand(3).getReg();
|
|
|
|
MachineBasicBlock *BrTarget = BrCond->getOperand(1).getMBB();
|
|
if (Br)
|
|
BrTarget = Br->getOperand(0).getMBB();
|
|
|
|
if (IntrID == Intrinsic::amdgcn_if) {
|
|
B.buildInstr(AMDGPU::SI_IF)
|
|
.addDef(Def)
|
|
.addUse(Use)
|
|
.addMBB(BrTarget);
|
|
} else {
|
|
B.buildInstr(AMDGPU::SI_ELSE)
|
|
.addDef(Def)
|
|
.addUse(Use)
|
|
.addMBB(BrTarget)
|
|
.addImm(0);
|
|
}
|
|
|
|
if (Br)
|
|
Br->getOperand(0).setMBB(BrCond->getOperand(1).getMBB());
|
|
|
|
MRI.setRegClass(Def, TRI->getWaveMaskRegClass());
|
|
MRI.setRegClass(Use, TRI->getWaveMaskRegClass());
|
|
MI.eraseFromParent();
|
|
BrCond->eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
case Intrinsic::amdgcn_loop: {
|
|
MachineInstr *Br = nullptr;
|
|
if (MachineInstr *BrCond = verifyCFIntrinsic(MI, MRI, Br)) {
|
|
const SIRegisterInfo *TRI
|
|
= static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo());
|
|
|
|
B.setInstr(*BrCond);
|
|
|
|
MachineBasicBlock *BrTarget = BrCond->getOperand(1).getMBB();
|
|
if (Br)
|
|
BrTarget = Br->getOperand(0).getMBB();
|
|
|
|
Register Reg = MI.getOperand(2).getReg();
|
|
B.buildInstr(AMDGPU::SI_LOOP)
|
|
.addUse(Reg)
|
|
.addMBB(BrTarget);
|
|
|
|
if (Br)
|
|
Br->getOperand(0).setMBB(BrCond->getOperand(1).getMBB());
|
|
|
|
MI.eraseFromParent();
|
|
BrCond->eraseFromParent();
|
|
MRI.setRegClass(Reg, TRI->getWaveMaskRegClass());
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
case Intrinsic::amdgcn_kernarg_segment_ptr:
|
|
return legalizePreloadedArgIntrin(
|
|
MI, MRI, B, AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
|
|
case Intrinsic::amdgcn_implicitarg_ptr:
|
|
return legalizeImplicitArgPtr(MI, MRI, B);
|
|
case Intrinsic::amdgcn_workitem_id_x:
|
|
return legalizePreloadedArgIntrin(MI, MRI, B,
|
|
AMDGPUFunctionArgInfo::WORKITEM_ID_X);
|
|
case Intrinsic::amdgcn_workitem_id_y:
|
|
return legalizePreloadedArgIntrin(MI, MRI, B,
|
|
AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
|
|
case Intrinsic::amdgcn_workitem_id_z:
|
|
return legalizePreloadedArgIntrin(MI, MRI, B,
|
|
AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
|
|
case Intrinsic::amdgcn_workgroup_id_x:
|
|
return legalizePreloadedArgIntrin(MI, MRI, B,
|
|
AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
|
|
case Intrinsic::amdgcn_workgroup_id_y:
|
|
return legalizePreloadedArgIntrin(MI, MRI, B,
|
|
AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
|
|
case Intrinsic::amdgcn_workgroup_id_z:
|
|
return legalizePreloadedArgIntrin(MI, MRI, B,
|
|
AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
|
|
case Intrinsic::amdgcn_dispatch_ptr:
|
|
return legalizePreloadedArgIntrin(MI, MRI, B,
|
|
AMDGPUFunctionArgInfo::DISPATCH_PTR);
|
|
case Intrinsic::amdgcn_queue_ptr:
|
|
return legalizePreloadedArgIntrin(MI, MRI, B,
|
|
AMDGPUFunctionArgInfo::QUEUE_PTR);
|
|
case Intrinsic::amdgcn_implicit_buffer_ptr:
|
|
return legalizePreloadedArgIntrin(
|
|
MI, MRI, B, AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
|
|
case Intrinsic::amdgcn_dispatch_id:
|
|
return legalizePreloadedArgIntrin(MI, MRI, B,
|
|
AMDGPUFunctionArgInfo::DISPATCH_ID);
|
|
case Intrinsic::amdgcn_fdiv_fast:
|
|
return legalizeFDIVFastIntrin(MI, MRI, B);
|
|
case Intrinsic::amdgcn_is_shared:
|
|
return legalizeIsAddrSpace(MI, MRI, B, AMDGPUAS::LOCAL_ADDRESS);
|
|
case Intrinsic::amdgcn_is_private:
|
|
return legalizeIsAddrSpace(MI, MRI, B, AMDGPUAS::PRIVATE_ADDRESS);
|
|
case Intrinsic::amdgcn_wavefrontsize: {
|
|
B.setInstr(MI);
|
|
B.buildConstant(MI.getOperand(0), ST.getWavefrontSize());
|
|
MI.eraseFromParent();
|
|
return true;
|
|
}
|
|
case Intrinsic::amdgcn_s_buffer_load:
|
|
return legalizeSBufferLoad(MI, B, Observer);
|
|
case Intrinsic::amdgcn_raw_buffer_store:
|
|
case Intrinsic::amdgcn_struct_buffer_store:
|
|
return legalizeBufferStore(MI, MRI, B, false, false);
|
|
case Intrinsic::amdgcn_raw_buffer_store_format:
|
|
case Intrinsic::amdgcn_struct_buffer_store_format:
|
|
return legalizeBufferStore(MI, MRI, B, false, true);
|
|
case Intrinsic::amdgcn_raw_tbuffer_store:
|
|
case Intrinsic::amdgcn_struct_tbuffer_store:
|
|
return legalizeBufferStore(MI, MRI, B, true, true);
|
|
case Intrinsic::amdgcn_raw_buffer_load:
|
|
case Intrinsic::amdgcn_struct_buffer_load:
|
|
return legalizeBufferLoad(MI, MRI, B, false, false);
|
|
case Intrinsic::amdgcn_raw_buffer_load_format:
|
|
case Intrinsic::amdgcn_struct_buffer_load_format:
|
|
return legalizeBufferLoad(MI, MRI, B, true, false);
|
|
case Intrinsic::amdgcn_raw_tbuffer_load:
|
|
case Intrinsic::amdgcn_struct_tbuffer_load:
|
|
return legalizeBufferLoad(MI, MRI, B, true, true);
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_swap:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_swap:
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_add:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_add:
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_sub:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_sub:
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_smin:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_smin:
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_umin:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_umin:
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_smax:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_smax:
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_umax:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_umax:
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_and:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_and:
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_or:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_or:
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_xor:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_xor:
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_inc:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_inc:
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_dec:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_dec:
|
|
case Intrinsic::amdgcn_raw_buffer_atomic_cmpswap:
|
|
case Intrinsic::amdgcn_struct_buffer_atomic_cmpswap:
|
|
return legalizeBufferAtomic(MI, B, IntrID);
|
|
case Intrinsic::amdgcn_atomic_inc:
|
|
return legalizeAtomicIncDec(MI, B, true);
|
|
case Intrinsic::amdgcn_atomic_dec:
|
|
return legalizeAtomicIncDec(MI, B, false);
|
|
case Intrinsic::trap:
|
|
return legalizeTrapIntrinsic(MI, MRI, B);
|
|
case Intrinsic::debugtrap:
|
|
return legalizeDebugTrapIntrinsic(MI, MRI, B);
|
|
default: {
|
|
if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
|
|
AMDGPU::getImageDimIntrinsicInfo(IntrID))
|
|
return legalizeImageIntrinsic(MI, B, Observer, ImageDimIntr);
|
|
return true;
|
|
}
|
|
}
|
|
|
|
return true;
|
|
}
|