[GlobalISel] NFC: Change LLT::vector to take ElementCount.

This also adds new interfaces for the fixed- and scalable case:
* LLT::fixed_vector
* LLT::scalable_vector

The strategy for migrating to the new interfaces was as follows:
* If the new LLT is a (modified) clone of another LLT, taking the
  same number of elements, then use LLT::vector(OtherTy.getElementCount())
  or if the number of elements is halfed/doubled, it uses .divideCoefficientBy(2)
  or operator*. That is because there is no reason to specifically restrict
  the types to 'fixed_vector'.
* If the algorithm works on the number of elements (as unsigned), then
  just use fixed_vector. This will need to be fixed up in the future when
  modifying the algorithm to also work for scalable vectors, and will need
  then need additional tests to confirm the behaviour works the same for
  scalable vectors.
* If the test used the '/*Scalable=*/true` flag of LLT::vector, then
  this is replaced by LLT::scalable_vector.

Reviewed By: aemerson

Differential Revision: https://reviews.llvm.org/D104451
This commit is contained in:
Sander de Smalen 2021-06-24 09:58:21 +01:00
parent 9c4c2f2472
commit d5e14ba88c
32 changed files with 417 additions and 380 deletions

View File

@ -1021,7 +1021,7 @@ public:
[=](const LegalityQuery &Query) {
LLT VecTy = Query.Types[TypeIdx];
return std::make_pair(
TypeIdx, LLT::vector(MinElements, VecTy.getElementType()));
TypeIdx, LLT::fixed_vector(MinElements, VecTy.getElementType()));
});
}
/// Limit the number of elements in EltTy vectors to at most MaxElements.

View File

@ -55,30 +55,51 @@ public:
}
/// Get a low-level vector of some number of elements and element width.
/// \p NumElements must be at least 2.
static LLT vector(uint16_t NumElements, unsigned ScalarSizeInBits,
bool Scalable = false) {
assert(((!Scalable && NumElements > 1) || NumElements > 0) &&
"invalid number of vector elements");
static LLT vector(ElementCount EC, unsigned ScalarSizeInBits) {
assert(!EC.isScalar() && "invalid number of vector elements");
assert(ScalarSizeInBits > 0 && "invalid vector element size");
return LLT{/*isPointer=*/false, /*isVector=*/true,
ElementCount::get(NumElements, Scalable), ScalarSizeInBits,
return LLT{/*isPointer=*/false, /*isVector=*/true, EC, ScalarSizeInBits,
/*AddressSpace=*/0};
}
/// Get a low-level vector of some number of elements and element type.
static LLT vector(uint16_t NumElements, LLT ScalarTy, bool Scalable = false) {
assert(((!Scalable && NumElements > 1) || NumElements > 0) &&
"invalid number of vector elements");
static LLT vector(ElementCount EC, LLT ScalarTy) {
assert(!EC.isScalar() && "invalid number of vector elements");
assert(!ScalarTy.isVector() && "invalid vector element type");
return LLT{ScalarTy.isPointer(), /*isVector=*/true,
ElementCount::get(NumElements, Scalable),
return LLT{ScalarTy.isPointer(), /*isVector=*/true, EC,
ScalarTy.getSizeInBits(),
ScalarTy.isPointer() ? ScalarTy.getAddressSpace() : 0};
}
/// Get a low-level fixed-width vector of some number of elements and element
/// width.
static LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits) {
return vector(ElementCount::getFixed(NumElements), ScalarSizeInBits);
}
/// Get a low-level fixed-width vector of some number of elements and element
/// type.
static LLT fixed_vector(unsigned NumElements, LLT ScalarTy) {
return vector(ElementCount::getFixed(NumElements), ScalarTy);
}
/// Get a low-level scalable vector of some number of elements and element
/// width.
static LLT scalable_vector(unsigned MinNumElements,
unsigned ScalarSizeInBits) {
return vector(ElementCount::getScalable(MinNumElements), ScalarSizeInBits);
}
/// Get a low-level scalable vector of some number of elements and element
/// type.
static LLT scalable_vector(unsigned MinNumElements, LLT ScalarTy) {
return vector(ElementCount::getScalable(MinNumElements), ScalarTy);
}
static LLT scalarOrVector(uint16_t NumElements, LLT ScalarTy) {
return NumElements == 1 ? ScalarTy : LLT::vector(NumElements, ScalarTy);
// FIXME: Migrate interface to use ElementCount
return NumElements == 1 ? ScalarTy
: LLT::fixed_vector(NumElements, ScalarTy);
}
static LLT scalarOrVector(uint16_t NumElements, unsigned ScalarSize) {
@ -150,9 +171,7 @@ public:
/// If this type is a vector, return a vector with the same number of elements
/// but the new element type. Otherwise, return the new element type.
LLT changeElementType(LLT NewEltTy) const {
return isVector() ? LLT::vector(getElementCount().getKnownMinValue(),
NewEltTy, isScalable())
: NewEltTy;
return isVector() ? LLT::vector(getElementCount(), NewEltTy) : NewEltTy;
}
/// If this type is a vector, return a vector with the same number of elements
@ -161,8 +180,7 @@ public:
LLT changeElementSize(unsigned NewEltSize) const {
assert(!getScalarType().isPointer() &&
"invalid to directly change element size for pointers");
return isVector() ? LLT::vector(getElementCount().getKnownMinValue(),
NewEltSize, isScalable())
return isVector() ? LLT::vector(getElementCount(), NewEltSize)
: LLT::scalar(NewEltSize);
}

View File

@ -438,7 +438,7 @@ static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef<Register> OrigRegs,
} else {
// Vector was split, and elements promoted to a wider type.
// FIXME: Should handle floating point promotions.
LLT BVType = LLT::vector(LLTy.getNumElements(), PartLLT);
LLT BVType = LLT::fixed_vector(LLTy.getNumElements(), PartLLT);
auto BV = B.buildBuildVector(BVType, Regs);
B.buildTrunc(OrigRegs[0], BV);
}

View File

@ -1479,7 +1479,8 @@ bool IRTranslator::translateGetElementPtr(const User &U,
// are vectors.
if (VectorWidth && !PtrTy.isVector()) {
BaseReg =
MIRBuilder.buildSplatVector(LLT::vector(VectorWidth, PtrTy), BaseReg)
MIRBuilder
.buildSplatVector(LLT::fixed_vector(VectorWidth, PtrTy), BaseReg)
.getReg(0);
PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth);
PtrTy = getLLTForType(*PtrIRTy, *DL);

View File

@ -342,8 +342,8 @@ LegacyLegalizerInfo::findVectorLegalAction(const InstrAspect &Aspect) const {
LLT IntermediateType;
auto ElementSizeAndAction =
findAction(ElemSizeVec, Aspect.Type.getScalarSizeInBits());
IntermediateType =
LLT::vector(Aspect.Type.getNumElements(), ElementSizeAndAction.first);
IntermediateType = LLT::fixed_vector(Aspect.Type.getNumElements(),
ElementSizeAndAction.first);
if (ElementSizeAndAction.second != Legal)
return {ElementSizeAndAction.second, IntermediateType};
@ -356,8 +356,8 @@ LegacyLegalizerInfo::findVectorLegalAction(const InstrAspect &Aspect) const {
auto NumElementsAndAction =
findAction(NumElementsVec, IntermediateType.getNumElements());
return {NumElementsAndAction.second,
LLT::vector(NumElementsAndAction.first,
IntermediateType.getScalarSizeInBits())};
LLT::fixed_vector(NumElementsAndAction.first,
IntermediateType.getScalarSizeInBits())};
}
unsigned LegacyLegalizerInfo::getOpcodeIdxForOpcode(unsigned Opcode) const {

View File

@ -69,8 +69,8 @@ LegalizeMutation LegalizeMutations::moreElementsToNextPow2(unsigned TypeIdx,
const LLT VecTy = Query.Types[TypeIdx];
unsigned NewNumElements =
std::max(1u << Log2_32_Ceil(VecTy.getNumElements()), Min);
return std::make_pair(TypeIdx,
LLT::vector(NewNumElements, VecTy.getElementType()));
return std::make_pair(
TypeIdx, LLT::fixed_vector(NewNumElements, VecTy.getElementType()));
};
}

View File

@ -798,7 +798,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
if (SizeOp0 % NarrowSize != 0) {
LLT ImplicitTy = NarrowTy;
if (DstTy.isVector())
ImplicitTy = LLT::vector(DstTy.getNumElements(), ImplicitTy);
ImplicitTy = LLT::vector(DstTy.getElementCount(), ImplicitTy);
Register ImplicitReg = MIRBuilder.buildUndef(ImplicitTy).getReg(0);
MIRBuilder.buildAnyExt(DstReg, ImplicitReg);
@ -2286,9 +2286,9 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
LLT VecTy = MRI.getType(VecReg);
Observer.changingInstr(MI);
widenScalarSrc(MI, LLT::vector(VecTy.getNumElements(),
WideTy.getSizeInBits()),
1, TargetOpcode::G_SEXT);
widenScalarSrc(
MI, LLT::vector(VecTy.getElementCount(), WideTy.getSizeInBits()), 1,
TargetOpcode::G_SEXT);
widenScalarDst(MI, WideTy, 0);
Observer.changedInstr(MI);
@ -2309,7 +2309,7 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
Register VecReg = MI.getOperand(1).getReg();
LLT VecTy = MRI.getType(VecReg);
LLT WideVecTy = LLT::vector(VecTy.getNumElements(), WideTy);
LLT WideVecTy = LLT::vector(VecTy.getElementCount(), WideTy);
widenScalarSrc(MI, WideVecTy, 1, TargetOpcode::G_ANYEXT);
widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT);
@ -2469,7 +2469,7 @@ LegalizerHelper::lowerBitcast(MachineInstr &MI) {
// %3:_(<2 x s8>) = G_BITCAST %2
// %4:_(<2 x s8>) = G_BITCAST %3
// %1:_(<4 x s16>) = G_CONCAT_VECTORS %3, %4
DstCastTy = LLT::vector(NumDstElt / NumSrcElt, DstEltTy);
DstCastTy = LLT::fixed_vector(NumDstElt / NumSrcElt, DstEltTy);
SrcPartTy = SrcEltTy;
} else if (NumSrcElt > NumDstElt) { // Source element type is smaller.
//
@ -2481,7 +2481,7 @@ LegalizerHelper::lowerBitcast(MachineInstr &MI) {
// %3:_(s16) = G_BITCAST %2
// %4:_(s16) = G_BITCAST %3
// %1:_(<2 x s16>) = G_BUILD_VECTOR %3, %4
SrcPartTy = LLT::vector(NumSrcElt / NumDstElt, SrcEltTy);
SrcPartTy = LLT::fixed_vector(NumSrcElt / NumDstElt, SrcEltTy);
DstCastTy = DstEltTy;
}
@ -3397,7 +3397,7 @@ LegalizerHelper::fewerElementsVectorCasts(MachineInstr &MI, unsigned TypeIdx,
if (NumParts * NarrowTy.getNumElements() != DstTy.getNumElements())
return UnableToLegalize;
NarrowTy1 = LLT::vector(NarrowTy.getNumElements(), SrcTy.getElementType());
NarrowTy1 = LLT::vector(NarrowTy.getElementCount(), SrcTy.getElementType());
} else {
NumParts = DstTy.getNumElements();
NarrowTy1 = SrcTy.getElementType();
@ -3441,9 +3441,9 @@ LegalizerHelper::fewerElementsVectorCmp(MachineInstr &MI, unsigned TypeIdx,
NarrowTy0 = NarrowTy;
NumParts = NarrowTy.isVector() ? (OldElts / NewElts) : DstTy.getNumElements();
NarrowTy1 = NarrowTy.isVector() ?
LLT::vector(NarrowTy.getNumElements(), SrcTy.getScalarSizeInBits()) :
SrcTy.getElementType();
NarrowTy1 = NarrowTy.isVector() ? LLT::vector(NarrowTy.getElementCount(),
SrcTy.getScalarSizeInBits())
: SrcTy.getElementType();
} else {
unsigned NewElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1;
@ -3451,8 +3451,8 @@ LegalizerHelper::fewerElementsVectorCmp(MachineInstr &MI, unsigned TypeIdx,
NumParts = NarrowTy.isVector() ? (OldElts / NewElts) :
NarrowTy.getNumElements();
NarrowTy0 = LLT::vector(NarrowTy.getNumElements(),
DstTy.getScalarSizeInBits());
NarrowTy0 =
LLT::vector(NarrowTy.getElementCount(), DstTy.getScalarSizeInBits());
NarrowTy1 = NarrowTy;
}
@ -3523,8 +3523,9 @@ LegalizerHelper::fewerElementsVectorSelect(MachineInstr &MI, unsigned TypeIdx,
if (CondTy.getNumElements() == NumParts)
NarrowTy1 = CondTy.getElementType();
else
NarrowTy1 = LLT::vector(CondTy.getNumElements() / NumParts,
CondTy.getScalarSizeInBits());
NarrowTy1 =
LLT::vector(CondTy.getElementCount().divideCoefficientBy(NumParts),
CondTy.getScalarSizeInBits());
}
} else {
NumParts = CondTy.getNumElements();

View File

@ -773,8 +773,9 @@ LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
int GCDElts = greatestCommonDivisor(OrigTy.getNumElements(),
TargetTy.getNumElements());
// Prefer the original element type.
int Mul = OrigTy.getNumElements() * TargetTy.getNumElements();
return LLT::vector(Mul / GCDElts, OrigTy.getElementType());
ElementCount Mul = OrigTy.getElementCount() * TargetTy.getNumElements();
return LLT::vector(Mul.divideCoefficientBy(GCDElts),
OrigTy.getElementType());
}
} else {
if (OrigElt.getSizeInBits() == TargetSize)
@ -782,12 +783,12 @@ LLT llvm::getLCMType(LLT OrigTy, LLT TargetTy) {
}
unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
return LLT::vector(LCMSize / OrigElt.getSizeInBits(), OrigElt);
return LLT::fixed_vector(LCMSize / OrigElt.getSizeInBits(), OrigElt);
}
if (TargetTy.isVector()) {
unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
return LLT::vector(LCMSize / OrigSize, OrigTy);
return LLT::fixed_vector(LCMSize / OrigSize, OrigTy);
}
unsigned LCMSize = getLCMSize(OrigSize, TargetSize);
@ -831,7 +832,7 @@ LLT llvm::getGCDType(LLT OrigTy, LLT TargetTy) {
// scalar.
if (GCD < OrigElt.getSizeInBits())
return LLT::scalar(GCD);
return LLT::vector(GCD / OrigElt.getSizeInBits(), OrigElt);
return LLT::fixed_vector(GCD / OrigElt.getSizeInBits(), OrigElt);
}
if (TargetTy.isVector()) {

View File

@ -24,7 +24,7 @@ LLT llvm::getLLTForType(Type &Ty, const DataLayout &DL) {
LLT ScalarTy = getLLTForType(*VTy->getElementType(), DL);
if (EC.isScalar())
return ScalarTy;
return LLT::vector(EC.getKnownMinValue(), ScalarTy, EC.isScalable());
return LLT::vector(EC, ScalarTy);
}
if (auto PTy = dyn_cast<PointerType>(&Ty)) {
@ -56,7 +56,7 @@ LLT llvm::getLLTForMVT(MVT Ty) {
if (!Ty.isVector())
return LLT::scalar(Ty.getSizeInBits());
return LLT::vector(Ty.getVectorNumElements(),
return LLT::vector(Ty.getVectorElementCount(),
Ty.getVectorElementType().getSizeInBits());
}

View File

@ -1729,7 +1729,7 @@ bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty) {
return error(Loc, "expected <M x sN> or <M x pA> for vector type");
lex();
Ty = LLT::vector(NumElements, Ty);
Ty = LLT::fixed_vector(NumElements, Ty);
return false;
}

View File

@ -1818,7 +1818,7 @@ bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
// Disregard v2i64. Memcpy lowering produces those and splitting
// them regresses performance on micro-benchmarks and olden/bh.
Ty == LLT::vector(2, 64);
Ty == LLT::fixed_vector(2, 64);
}
return true;
}
@ -11756,7 +11756,7 @@ LLT AArch64TargetLowering::getOptimalMemOpLLT(
if (CanUseNEON && Op.isMemset() && !IsSmallMemset &&
AlignmentIsAcceptable(MVT::v2i64, Align(16)))
return LLT::vector(2, 64);
return LLT::fixed_vector(2, 64);
if (CanUseFP && !IsSmallMemset && AlignmentIsAcceptable(MVT::f128, Align(16)))
return LLT::scalar(128);
if (Op.size() >= 8 && AlignmentIsAcceptable(MVT::i64, Align(8)))

View File

@ -1685,19 +1685,19 @@ bool AArch64InstructionSelector::selectVectorSHL(MachineInstr &I,
Optional<int64_t> ImmVal = getVectorSHLImm(Ty, Src2Reg, MRI);
unsigned Opc = 0;
if (Ty == LLT::vector(2, 64)) {
if (Ty == LLT::fixed_vector(2, 64)) {
Opc = ImmVal ? AArch64::SHLv2i64_shift : AArch64::USHLv2i64;
} else if (Ty == LLT::vector(4, 32)) {
} else if (Ty == LLT::fixed_vector(4, 32)) {
Opc = ImmVal ? AArch64::SHLv4i32_shift : AArch64::USHLv4i32;
} else if (Ty == LLT::vector(2, 32)) {
} else if (Ty == LLT::fixed_vector(2, 32)) {
Opc = ImmVal ? AArch64::SHLv2i32_shift : AArch64::USHLv2i32;
} else if (Ty == LLT::vector(4, 16)) {
} else if (Ty == LLT::fixed_vector(4, 16)) {
Opc = ImmVal ? AArch64::SHLv4i16_shift : AArch64::USHLv4i16;
} else if (Ty == LLT::vector(8, 16)) {
} else if (Ty == LLT::fixed_vector(8, 16)) {
Opc = ImmVal ? AArch64::SHLv8i16_shift : AArch64::USHLv8i16;
} else if (Ty == LLT::vector(16, 8)) {
} else if (Ty == LLT::fixed_vector(16, 8)) {
Opc = ImmVal ? AArch64::SHLv16i8_shift : AArch64::USHLv16i8;
} else if (Ty == LLT::vector(8, 8)) {
} else if (Ty == LLT::fixed_vector(8, 8)) {
Opc = ImmVal ? AArch64::SHLv8i8_shift : AArch64::USHLv8i8;
} else {
LLVM_DEBUG(dbgs() << "Unhandled G_SHL type");
@ -1739,25 +1739,25 @@ bool AArch64InstructionSelector::selectVectorAshrLshr(
unsigned NegOpc = 0;
const TargetRegisterClass *RC =
getRegClassForTypeOnBank(Ty, RBI.getRegBank(AArch64::FPRRegBankID), RBI);
if (Ty == LLT::vector(2, 64)) {
if (Ty == LLT::fixed_vector(2, 64)) {
Opc = IsASHR ? AArch64::SSHLv2i64 : AArch64::USHLv2i64;
NegOpc = AArch64::NEGv2i64;
} else if (Ty == LLT::vector(4, 32)) {
} else if (Ty == LLT::fixed_vector(4, 32)) {
Opc = IsASHR ? AArch64::SSHLv4i32 : AArch64::USHLv4i32;
NegOpc = AArch64::NEGv4i32;
} else if (Ty == LLT::vector(2, 32)) {
} else if (Ty == LLT::fixed_vector(2, 32)) {
Opc = IsASHR ? AArch64::SSHLv2i32 : AArch64::USHLv2i32;
NegOpc = AArch64::NEGv2i32;
} else if (Ty == LLT::vector(4, 16)) {
} else if (Ty == LLT::fixed_vector(4, 16)) {
Opc = IsASHR ? AArch64::SSHLv4i16 : AArch64::USHLv4i16;
NegOpc = AArch64::NEGv4i16;
} else if (Ty == LLT::vector(8, 16)) {
} else if (Ty == LLT::fixed_vector(8, 16)) {
Opc = IsASHR ? AArch64::SSHLv8i16 : AArch64::USHLv8i16;
NegOpc = AArch64::NEGv8i16;
} else if (Ty == LLT::vector(16, 8)) {
} else if (Ty == LLT::fixed_vector(16, 8)) {
Opc = IsASHR ? AArch64::SSHLv16i8 : AArch64::USHLv16i8;
NegOpc = AArch64::NEGv16i8;
} else if (Ty == LLT::vector(8, 8)) {
} else if (Ty == LLT::fixed_vector(8, 8)) {
Opc = IsASHR ? AArch64::SSHLv8i8 : AArch64::USHLv8i8;
NegOpc = AArch64::NEGv8i8;
} else {
@ -1961,7 +1961,8 @@ bool AArch64InstructionSelector::convertPtrAddToAdd(
if (PtrTy.getAddressSpace() != 0)
return false;
const LLT CastPtrTy = PtrTy.isVector() ? LLT::vector(2, 64) : LLT::scalar(64);
const LLT CastPtrTy =
PtrTy.isVector() ? LLT::fixed_vector(2, 64) : LLT::scalar(64);
auto PtrToInt = MIB.buildPtrToInt(CastPtrTy, AddOp1Reg);
// Set regbanks on the registers.
if (PtrTy.isVector())
@ -2918,7 +2919,8 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
I.setDesc(TII.get(TargetOpcode::COPY));
return true;
} else if (DstRB.getID() == AArch64::FPRRegBankID) {
if (DstTy == LLT::vector(4, 16) && SrcTy == LLT::vector(4, 32)) {
if (DstTy == LLT::fixed_vector(4, 16) &&
SrcTy == LLT::fixed_vector(4, 32)) {
I.setDesc(TII.get(AArch64::XTNv4i16));
constrainSelectedInstRegOperands(I, TII, TRI, RBI);
return true;
@ -3239,13 +3241,13 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
AArch64::GPRRegBankID)
return false; // We expect the fpr regbank case to be imported.
LLT VecTy = MRI.getType(I.getOperand(0).getReg());
if (VecTy == LLT::vector(8, 8))
if (VecTy == LLT::fixed_vector(8, 8))
I.setDesc(TII.get(AArch64::DUPv8i8gpr));
else if (VecTy == LLT::vector(16, 8))
else if (VecTy == LLT::fixed_vector(16, 8))
I.setDesc(TII.get(AArch64::DUPv16i8gpr));
else if (VecTy == LLT::vector(4, 16))
else if (VecTy == LLT::fixed_vector(4, 16))
I.setDesc(TII.get(AArch64::DUPv4i16gpr));
else if (VecTy == LLT::vector(8, 16))
else if (VecTy == LLT::fixed_vector(8, 16))
I.setDesc(TII.get(AArch64::DUPv8i16gpr));
else
return false;
@ -3286,7 +3288,7 @@ bool AArch64InstructionSelector::selectReduction(MachineInstr &I,
if (I.getOpcode() == TargetOpcode::G_VECREDUCE_ADD) {
// For <2 x i32> ADDPv2i32 generates an FPR64 value, so we need to emit
// a subregister copy afterwards.
if (VecTy == LLT::vector(2, 32)) {
if (VecTy == LLT::fixed_vector(2, 32)) {
Register DstReg = I.getOperand(0).getReg();
auto AddP = MIB.buildInstr(AArch64::ADDPv2i32, {&AArch64::FPR64RegClass},
{VecReg, VecReg});
@ -3299,13 +3301,13 @@ bool AArch64InstructionSelector::selectReduction(MachineInstr &I,
}
unsigned Opc = 0;
if (VecTy == LLT::vector(16, 8))
if (VecTy == LLT::fixed_vector(16, 8))
Opc = AArch64::ADDVv16i8v;
else if (VecTy == LLT::vector(8, 16))
else if (VecTy == LLT::fixed_vector(8, 16))
Opc = AArch64::ADDVv8i16v;
else if (VecTy == LLT::vector(4, 32))
else if (VecTy == LLT::fixed_vector(4, 32))
Opc = AArch64::ADDVv4i32v;
else if (VecTy == LLT::vector(2, 64))
else if (VecTy == LLT::fixed_vector(2, 64))
Opc = AArch64::ADDPv2i64p;
else {
LLVM_DEBUG(dbgs() << "Unhandled type for add reduction");
@ -3317,9 +3319,9 @@ bool AArch64InstructionSelector::selectReduction(MachineInstr &I,
if (I.getOpcode() == TargetOpcode::G_VECREDUCE_FADD) {
unsigned Opc = 0;
if (VecTy == LLT::vector(2, 32))
if (VecTy == LLT::fixed_vector(2, 32))
Opc = AArch64::FADDPv2i32p;
else if (VecTy == LLT::vector(2, 64))
else if (VecTy == LLT::fixed_vector(2, 64))
Opc = AArch64::FADDPv2i64p;
else {
LLVM_DEBUG(dbgs() << "Unhandled type for fadd reduction");

View File

@ -47,16 +47,16 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
const LLT s128 = LLT::scalar(128);
const LLT s256 = LLT::scalar(256);
const LLT s512 = LLT::scalar(512);
const LLT v16s8 = LLT::vector(16, 8);
const LLT v8s8 = LLT::vector(8, 8);
const LLT v4s8 = LLT::vector(4, 8);
const LLT v8s16 = LLT::vector(8, 16);
const LLT v4s16 = LLT::vector(4, 16);
const LLT v2s16 = LLT::vector(2, 16);
const LLT v2s32 = LLT::vector(2, 32);
const LLT v4s32 = LLT::vector(4, 32);
const LLT v2s64 = LLT::vector(2, 64);
const LLT v2p0 = LLT::vector(2, p0);
const LLT v16s8 = LLT::fixed_vector(16, 8);
const LLT v8s8 = LLT::fixed_vector(8, 8);
const LLT v4s8 = LLT::fixed_vector(4, 8);
const LLT v8s16 = LLT::fixed_vector(8, 16);
const LLT v4s16 = LLT::fixed_vector(4, 16);
const LLT v2s16 = LLT::fixed_vector(2, 16);
const LLT v2s32 = LLT::fixed_vector(2, 32);
const LLT v4s32 = LLT::fixed_vector(4, 32);
const LLT v2s64 = LLT::fixed_vector(2, 64);
const LLT v2p0 = LLT::fixed_vector(2, p0);
std::initializer_list<LLT> PackedVectorAllTypeList = {/* Begin 128bit types */
v16s8, v8s16, v4s32,
@ -92,7 +92,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
[=](const LegalityQuery &Query) {
LLT EltTy = Query.Types[0].getElementType();
if (EltTy == s64)
return std::make_pair(0, LLT::vector(2, 64));
return std::make_pair(0, LLT::fixed_vector(2, 64));
return std::make_pair(0, EltTy);
});
@ -977,7 +977,7 @@ bool AArch64LegalizerInfo::legalizeLoadStore(
}
unsigned PtrSize = ValTy.getElementType().getSizeInBits();
const LLT NewTy = LLT::vector(ValTy.getNumElements(), PtrSize);
const LLT NewTy = LLT::vector(ValTy.getElementCount(), PtrSize);
auto &MMO = **MI.memoperands_begin();
if (MI.getOpcode() == TargetOpcode::G_STORE) {
auto Bitcast = MIRBuilder.buildBitcast(NewTy, ValReg);
@ -1073,7 +1073,7 @@ bool AArch64LegalizerInfo::legalizeCTPOP(MachineInstr &MI,
assert((Size == 32 || Size == 64) && "Expected only 32 or 64 bit scalars!");
if (Size == 32)
Val = MIRBuilder.buildZExt(LLT::scalar(64), Val).getReg(0);
const LLT V8S8 = LLT::vector(8, LLT::scalar(8));
const LLT V8S8 = LLT::fixed_vector(8, LLT::scalar(8));
Val = MIRBuilder.buildBitcast(V8S8, Val).getReg(0);
auto CTPOP = MIRBuilder.buildCTPOP(V8S8, Val);
auto UADDLV =

View File

@ -705,7 +705,7 @@ bool applyDupLane(MachineInstr &MI, MachineRegisterInfo &MRI,
Register DupSrc = MI.getOperand(1).getReg();
// For types like <2 x s32>, we can use G_DUPLANE32, with a <4 x s32> source.
// To do this, we can use a G_CONCAT_VECTORS to do the widening.
if (SrcTy == LLT::vector(2, LLT::scalar(32))) {
if (SrcTy == LLT::fixed_vector(2, LLT::scalar(32))) {
assert(MRI.getType(MI.getOperand(0).getReg()).getNumElements() == 2 &&
"Unexpected dest elements");
auto Undef = B.buildUndef(SrcTy);

View File

@ -92,7 +92,7 @@ AMDGPUFunctionArgInfo::getPreloadedValue(
case AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER: {
return std::make_tuple(PrivateSegmentBuffer ? &PrivateSegmentBuffer
: nullptr,
&AMDGPU::SGPR_128RegClass, LLT::vector(4, 32));
&AMDGPU::SGPR_128RegClass, LLT::fixed_vector(4, 32));
}
case AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR:
return std::make_tuple(ImplicitBufferPtr ? &ImplicitBufferPtr : nullptr,

View File

@ -1076,8 +1076,8 @@ void AMDGPUCallLowering::handleImplicitCallArguments(
if (!ST.enableFlatScratch()) {
// Insert copies for the SRD. In the HSA case, this should be an identity
// copy.
auto ScratchRSrcReg =
MIRBuilder.buildCopy(LLT::vector(4, 32), FuncInfo.getScratchRSrcReg());
auto ScratchRSrcReg = MIRBuilder.buildCopy(LLT::fixed_vector(4, 32),
FuncInfo.getScratchRSrcReg());
MIRBuilder.buildCopy(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
CallInst.addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Implicit);
}

View File

@ -590,7 +590,7 @@ bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
return true;
const LLT S32 = LLT::scalar(32);
const LLT V2S16 = LLT::vector(2, 16);
const LLT V2S16 = LLT::fixed_vector(2, 16);
Register Dst = MI.getOperand(0).getReg();
if (MRI->getType(Dst) != V2S16)
@ -1883,7 +1883,7 @@ bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
return false;
}
if (DstTy == LLT::vector(2, 16) && SrcTy == LLT::vector(2, 32)) {
if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) {
MachineBasicBlock *MBB = I.getParent();
const DebugLoc &DL = I.getDebugLoc();
@ -2813,7 +2813,7 @@ bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
Register Src1Reg = MI.getOperand(2).getReg();
ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
const LLT V2S16 = LLT::vector(2, 16);
const LLT V2S16 = LLT::fixed_vector(2, 16);
if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
return false;
@ -3331,7 +3331,7 @@ AMDGPUInstructionSelector::selectVOP3PModsImpl(
if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
// It's possible to see an f32 fneg here, but unlikely.
// TODO: Treat f32 fneg as only high bit.
MRI.getType(Src) == LLT::vector(2, 16)) {
MRI.getType(Src) == LLT::fixed_vector(2, 16)) {
Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
Src = MI->getOperand(1).getReg();
MI = MRI.getVRegDef(Src);

View File

@ -95,7 +95,8 @@ static LegalizeMutation oneMoreElement(unsigned TypeIdx) {
return [=](const LegalityQuery &Query) {
const LLT Ty = Query.Types[TypeIdx];
const LLT EltTy = Ty.getElementType();
return std::make_pair(TypeIdx, LLT::vector(Ty.getNumElements() + 1, EltTy));
return std::make_pair(TypeIdx,
LLT::fixed_vector(Ty.getNumElements() + 1, EltTy));
};
}
@ -124,7 +125,7 @@ static LegalizeMutation moreEltsToNext32Bit(unsigned TypeIdx) {
assert(EltSize < 32);
const int NewNumElts = (32 * NextMul32 + EltSize - 1) / EltSize;
return std::make_pair(TypeIdx, LLT::vector(NewNumElts, EltTy));
return std::make_pair(TypeIdx, LLT::fixed_vector(NewNumElts, EltTy));
};
}
@ -433,35 +434,35 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
const LLT S512 = LLT::scalar(512);
const LLT MaxScalar = LLT::scalar(MaxRegisterSize);
const LLT V2S8 = LLT::vector(2, 8);
const LLT V2S16 = LLT::vector(2, 16);
const LLT V4S16 = LLT::vector(4, 16);
const LLT V2S8 = LLT::fixed_vector(2, 8);
const LLT V2S16 = LLT::fixed_vector(2, 16);
const LLT V4S16 = LLT::fixed_vector(4, 16);
const LLT V2S32 = LLT::vector(2, 32);
const LLT V3S32 = LLT::vector(3, 32);
const LLT V4S32 = LLT::vector(4, 32);
const LLT V5S32 = LLT::vector(5, 32);
const LLT V6S32 = LLT::vector(6, 32);
const LLT V7S32 = LLT::vector(7, 32);
const LLT V8S32 = LLT::vector(8, 32);
const LLT V9S32 = LLT::vector(9, 32);
const LLT V10S32 = LLT::vector(10, 32);
const LLT V11S32 = LLT::vector(11, 32);
const LLT V12S32 = LLT::vector(12, 32);
const LLT V13S32 = LLT::vector(13, 32);
const LLT V14S32 = LLT::vector(14, 32);
const LLT V15S32 = LLT::vector(15, 32);
const LLT V16S32 = LLT::vector(16, 32);
const LLT V32S32 = LLT::vector(32, 32);
const LLT V2S32 = LLT::fixed_vector(2, 32);
const LLT V3S32 = LLT::fixed_vector(3, 32);
const LLT V4S32 = LLT::fixed_vector(4, 32);
const LLT V5S32 = LLT::fixed_vector(5, 32);
const LLT V6S32 = LLT::fixed_vector(6, 32);
const LLT V7S32 = LLT::fixed_vector(7, 32);
const LLT V8S32 = LLT::fixed_vector(8, 32);
const LLT V9S32 = LLT::fixed_vector(9, 32);
const LLT V10S32 = LLT::fixed_vector(10, 32);
const LLT V11S32 = LLT::fixed_vector(11, 32);
const LLT V12S32 = LLT::fixed_vector(12, 32);
const LLT V13S32 = LLT::fixed_vector(13, 32);
const LLT V14S32 = LLT::fixed_vector(14, 32);
const LLT V15S32 = LLT::fixed_vector(15, 32);
const LLT V16S32 = LLT::fixed_vector(16, 32);
const LLT V32S32 = LLT::fixed_vector(32, 32);
const LLT V2S64 = LLT::vector(2, 64);
const LLT V3S64 = LLT::vector(3, 64);
const LLT V4S64 = LLT::vector(4, 64);
const LLT V5S64 = LLT::vector(5, 64);
const LLT V6S64 = LLT::vector(6, 64);
const LLT V7S64 = LLT::vector(7, 64);
const LLT V8S64 = LLT::vector(8, 64);
const LLT V16S64 = LLT::vector(16, 64);
const LLT V2S64 = LLT::fixed_vector(2, 64);
const LLT V3S64 = LLT::fixed_vector(3, 64);
const LLT V4S64 = LLT::fixed_vector(4, 64);
const LLT V5S64 = LLT::fixed_vector(5, 64);
const LLT V6S64 = LLT::fixed_vector(6, 64);
const LLT V7S64 = LLT::fixed_vector(7, 64);
const LLT V8S64 = LLT::fixed_vector(8, 64);
const LLT V16S64 = LLT::fixed_vector(16, 64);
std::initializer_list<LLT> AllS32Vectors =
{V2S32, V3S32, V4S32, V5S32, V6S32, V7S32, V8S32,
@ -1224,8 +1225,8 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
NumElts % NumPieces != 0)
return std::make_pair(0, EltTy);
return std::make_pair(0,
LLT::vector(NumElts / NumPieces, EltTy));
return std::make_pair(
0, LLT::fixed_vector(NumElts / NumPieces, EltTy));
}
// FIXME: We could probably handle weird extending loads better.
@ -1248,7 +1249,8 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
unsigned Align = Query.MMODescrs[0].AlignInBits;
if (EltSize > Align &&
(EltSize / Align < DstTy.getNumElements())) {
return std::make_pair(0, LLT::vector(EltSize / Align, EltTy));
return std::make_pair(
0, LLT::fixed_vector(EltSize / Align, EltTy));
}
// May need relegalization for the scalars.
@ -1325,19 +1327,21 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
// Condition should be s32 for scalar, s1 for vector.
getActionDefinitionsBuilder(G_SELECT)
.legalForCartesianProduct({S32, S64, S16, V2S32, V2S16, V4S16,
GlobalPtr, LocalPtr, FlatPtr, PrivatePtr,
LLT::vector(2, LocalPtr), LLT::vector(2, PrivatePtr)}, {S1, S32})
.clampScalar(0, S16, S64)
.scalarize(1)
.moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
.fewerElementsIf(numElementsNotEven(0), scalarize(0))
.clampMaxNumElements(0, S32, 2)
.clampMaxNumElements(0, LocalPtr, 2)
.clampMaxNumElements(0, PrivatePtr, 2)
.scalarize(0)
.widenScalarToNextPow2(0)
.legalIf(all(isPointer(0), typeInSet(1, {S1, S32})));
.legalForCartesianProduct({S32, S64, S16, V2S32, V2S16, V4S16, GlobalPtr,
LocalPtr, FlatPtr, PrivatePtr,
LLT::fixed_vector(2, LocalPtr),
LLT::fixed_vector(2, PrivatePtr)},
{S1, S32})
.clampScalar(0, S16, S64)
.scalarize(1)
.moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
.fewerElementsIf(numElementsNotEven(0), scalarize(0))
.clampMaxNumElements(0, S32, 2)
.clampMaxNumElements(0, LocalPtr, 2)
.clampMaxNumElements(0, PrivatePtr, 2)
.scalarize(0)
.widenScalarToNextPow2(0)
.legalIf(all(isPointer(0), typeInSet(1, {S1, S32})));
// TODO: Only the low 4/5/6 bits of the shift amount are observed, so we can
// be more flexible with the shift amount type.
@ -1416,7 +1420,8 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
const unsigned TargetEltSize = DstEltSize % 64 == 0 ? 64 : 32;
return std::make_pair(
VecTypeIdx, LLT::vector(VecSize / TargetEltSize, TargetEltSize));
VecTypeIdx,
LLT::fixed_vector(VecSize / TargetEltSize, TargetEltSize));
})
.clampScalar(EltTypeIdx, S32, S64)
.clampScalar(VecTypeIdx, S32, S64)
@ -2220,7 +2225,7 @@ bool AMDGPULegalizerInfo::legalizeInsertVectorElt(
bool AMDGPULegalizerInfo::legalizeShuffleVector(
MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &B) const {
const LLT V2S16 = LLT::vector(2, 16);
const LLT V2S16 = LLT::fixed_vector(2, 16);
Register Dst = MI.getOperand(0).getReg();
Register Src0 = MI.getOperand(1).getReg();
@ -2555,7 +2560,7 @@ bool AMDGPULegalizerInfo::legalizeAtomicCmpXChg(
"this should not have been custom lowered");
LLT ValTy = MRI.getType(CmpVal);
LLT VecTy = LLT::vector(2, ValTy);
LLT VecTy = LLT::fixed_vector(2, ValTy);
Register PackedVal = B.buildBuildVector(VecTy, { NewVal, CmpVal }).getReg(0);
@ -2707,7 +2712,7 @@ bool AMDGPULegalizerInfo::legalizeBuildVector(
MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B) const {
Register Dst = MI.getOperand(0).getReg();
const LLT S32 = LLT::scalar(32);
assert(MRI.getType(Dst) == LLT::vector(2, 16));
assert(MRI.getType(Dst) == LLT::fixed_vector(2, 16));
Register Src0 = MI.getOperand(1).getReg();
Register Src1 = MI.getOperand(2).getReg();
@ -3691,7 +3696,8 @@ Register AMDGPULegalizerInfo::handleD16VData(MachineIRBuilder &B,
int NumElts = StoreVT.getNumElements();
return B.buildBuildVector(LLT::vector(NumElts, S32), WideRegs).getReg(0);
return B.buildBuildVector(LLT::fixed_vector(NumElts, S32), WideRegs)
.getReg(0);
}
if (ImageStore && ST.hasImageStoreD16Bug()) {
@ -3700,7 +3706,8 @@ Register AMDGPULegalizerInfo::handleD16VData(MachineIRBuilder &B,
Reg = B.buildBitcast(S32, Reg).getReg(0);
PackedRegs.push_back(Reg);
PackedRegs.resize(2, B.buildUndef(S32).getReg(0));
return B.buildBuildVector(LLT::vector(2, S32), PackedRegs).getReg(0);
return B.buildBuildVector(LLT::fixed_vector(2, S32), PackedRegs)
.getReg(0);
}
if (StoreVT.getNumElements() == 3) {
@ -3709,18 +3716,19 @@ Register AMDGPULegalizerInfo::handleD16VData(MachineIRBuilder &B,
for (int I = 0, E = Unmerge->getNumOperands() - 1; I != E; ++I)
PackedRegs.push_back(Unmerge.getReg(I));
PackedRegs.resize(6, B.buildUndef(S16).getReg(0));
Reg = B.buildBuildVector(LLT::vector(6, S16), PackedRegs).getReg(0);
return B.buildBitcast(LLT::vector(3, S32), Reg).getReg(0);
Reg = B.buildBuildVector(LLT::fixed_vector(6, S16), PackedRegs).getReg(0);
return B.buildBitcast(LLT::fixed_vector(3, S32), Reg).getReg(0);
}
if (StoreVT.getNumElements() == 4) {
SmallVector<Register, 4> PackedRegs;
Reg = B.buildBitcast(LLT::vector(2, S32), Reg).getReg(0);
Reg = B.buildBitcast(LLT::fixed_vector(2, S32), Reg).getReg(0);
auto Unmerge = B.buildUnmerge(S32, Reg);
for (int I = 0, E = Unmerge->getNumOperands() - 1; I != E; ++I)
PackedRegs.push_back(Unmerge.getReg(I));
PackedRegs.resize(4, B.buildUndef(S32).getReg(0));
return B.buildBuildVector(LLT::vector(4, S32), PackedRegs).getReg(0);
return B.buildBuildVector(LLT::fixed_vector(4, S32), PackedRegs)
.getReg(0);
}
llvm_unreachable("invalid data type");
@ -4114,7 +4122,7 @@ static void packImage16bitOpsToDwords(MachineIRBuilder &B, MachineInstr &MI,
const AMDGPU::ImageDimIntrinsicInfo *Intr,
bool IsA16, bool IsG16) {
const LLT S16 = LLT::scalar(16);
const LLT V2S16 = LLT::vector(2, 16);
const LLT V2S16 = LLT::fixed_vector(2, 16);
auto EndIdx = Intr->VAddrEnd;
for (unsigned I = Intr->VAddrStart; I < EndIdx; I++) {
@ -4182,7 +4190,8 @@ static void convertImageAddrToPacked(MachineIRBuilder &B, MachineInstr &MI,
NumAddrRegs = RoundedNumRegs;
}
auto VAddr = B.buildBuildVector(LLT::vector(NumAddrRegs, 32), AddrRegs);
auto VAddr =
B.buildBuildVector(LLT::fixed_vector(NumAddrRegs, 32), AddrRegs);
MI.getOperand(DimIdx).setReg(VAddr.getReg(0));
}
@ -4223,7 +4232,7 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
MachineRegisterInfo *MRI = B.getMRI();
const LLT S32 = LLT::scalar(32);
const LLT S16 = LLT::scalar(16);
const LLT V2S16 = LLT::vector(2, 16);
const LLT V2S16 = LLT::fixed_vector(2, 16);
unsigned DMask = 0;
@ -4278,7 +4287,7 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
if (BaseOpcode->AtomicX2) {
Register VData1 = MI.getOperand(3).getReg();
// The two values are packed in one register.
LLT PackedTy = LLT::vector(2, Ty);
LLT PackedTy = LLT::fixed_vector(2, Ty);
auto Concat = B.buildBuildVector(PackedTy, {VData0, VData1});
MI.getOperand(2).setReg(Concat.getReg(0));
MI.getOperand(3).setReg(AMDGPU::NoRegister);
@ -4348,7 +4357,7 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
const bool UseNSA = PackedRegs.size() >= 3 && ST.hasNSAEncoding();
if (!UseNSA && PackedRegs.size() > 1) {
LLT PackedAddrTy = LLT::vector(2 * PackedRegs.size(), 16);
LLT PackedAddrTy = LLT::fixed_vector(2 * PackedRegs.size(), 16);
auto Concat = B.buildConcatVectors(PackedAddrTy, PackedRegs);
PackedRegs[0] = Concat.getReg(0);
PackedRegs.resize(1);
@ -4440,14 +4449,14 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
if (IsD16 && ST.hasUnpackedD16VMem()) {
RoundedTy = LLT::scalarOrVector(AdjustedNumElts, 32);
TFETy = LLT::vector(AdjustedNumElts + 1, 32);
TFETy = LLT::fixed_vector(AdjustedNumElts + 1, 32);
RegTy = S32;
} else {
unsigned EltSize = EltTy.getSizeInBits();
unsigned RoundedElts = (AdjustedTy.getSizeInBits() + 31) / 32;
unsigned RoundedSize = 32 * RoundedElts;
RoundedTy = LLT::scalarOrVector(RoundedSize / EltSize, EltSize);
TFETy = LLT::vector(RoundedSize / 32 + 1, S32);
TFETy = LLT::fixed_vector(RoundedSize / 32 + 1, S32);
RegTy = !IsTFE && EltSize == 16 ? V2S16 : S32;
}
@ -4561,10 +4570,10 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
const int RegsToCover = (Ty.getSizeInBits() + 31) / 32;
// Deal with the one annoying legal case.
const LLT V3S16 = LLT::vector(3, 16);
const LLT V3S16 = LLT::fixed_vector(3, 16);
if (Ty == V3S16) {
padWithUndef(ResTy, RegsToCover - ResultRegs.size() + 1);
auto Concat = B.buildConcatVectors(LLT::vector(6, 16), ResultRegs);
auto Concat = B.buildConcatVectors(LLT::fixed_vector(6, 16), ResultRegs);
B.buildUnmerge({DstReg, MRI->createGenericVirtualRegister(V3S16)}, Concat);
return true;
}

View File

@ -130,7 +130,7 @@ void AMDGPUPreLegalizerCombinerHelper::applyClampI64ToI16(
assert(MI.getOpcode() != AMDGPU::G_AMDGPU_CVT_PK_I16_I32);
const LLT V2S16 = LLT::vector(2, 16);
const LLT V2S16 = LLT::fixed_vector(2, 16);
auto CvtPk =
B.buildInstr(AMDGPU::G_AMDGPU_CVT_PK_I16_I32, {V2S16},
{Unmerge.getReg(0), Unmerge.getReg(1)}, MI.getFlags());

View File

@ -1133,7 +1133,7 @@ static LLT widen96To128(LLT Ty) {
LLT EltTy = Ty.getElementType();
assert(128 % EltTy.getSizeInBits() == 0);
return LLT::vector(128 / EltTy.getSizeInBits(), EltTy);
return LLT::fixed_vector(128 / EltTy.getSizeInBits(), EltTy);
}
bool AMDGPURegisterBankInfo::applyMappingLoad(MachineInstr &MI,
@ -1663,7 +1663,7 @@ Register AMDGPURegisterBankInfo::handleD16VData(MachineIRBuilder &B,
const LLT S32 = LLT::scalar(32);
int NumElts = StoreVT.getNumElements();
return B.buildMerge(LLT::vector(NumElts, S32), WideRegs).getReg(0);
return B.buildMerge(LLT::fixed_vector(NumElts, S32), WideRegs).getReg(0);
}
static std::pair<Register, unsigned>
@ -2067,7 +2067,7 @@ bool AMDGPURegisterBankInfo::foldInsertEltToCmpSelect(
}
}
LLT MergeTy = LLT::vector(Ops.size(), EltTy);
LLT MergeTy = LLT::fixed_vector(Ops.size(), EltTy);
if (MergeTy == MRI.getType(MI.getOperand(0).getReg())) {
B.buildBuildVector(MI.getOperand(0), Ops);
} else {
@ -2357,7 +2357,7 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
// 16-bit operations are VALU only, but can be promoted to 32-bit SALU.
// Packed 16-bit operations need to be scalarized and promoted.
if (DstTy != LLT::scalar(16) && DstTy != LLT::vector(2, 16))
if (DstTy != LLT::scalar(16) && DstTy != LLT::fixed_vector(2, 16))
break;
const RegisterBank *DstBank =
@ -2550,7 +2550,7 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
case AMDGPU::G_BUILD_VECTOR_TRUNC: {
Register DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
if (DstTy != LLT::vector(2, 16))
if (DstTy != LLT::fixed_vector(2, 16))
break;
assert(MI.getNumOperands() == 3 && OpdMapper.getVRegs(0).empty());
@ -2682,7 +2682,7 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
assert(DstTy.getSizeInBits() == 64);
LLT Vec32 = LLT::vector(2 * SrcTy.getNumElements(), 32);
LLT Vec32 = LLT::fixed_vector(2 * SrcTy.getNumElements(), 32);
auto CastSrc = B.buildBitcast(Vec32, SrcReg);
auto One = B.buildConstant(S32, 1);
@ -2799,7 +2799,7 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
assert(InsTy.getSizeInBits() == 64);
const LLT S32 = LLT::scalar(32);
LLT Vec32 = LLT::vector(2 * VecTy.getNumElements(), 32);
LLT Vec32 = LLT::fixed_vector(2 * VecTy.getNumElements(), 32);
MachineIRBuilder B(MI);
auto CastSrc = B.buildBitcast(Vec32, SrcReg);
@ -3629,7 +3629,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case AMDGPU::G_BUILD_VECTOR:
case AMDGPU::G_BUILD_VECTOR_TRUNC: {
LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
if (DstTy == LLT::vector(2, 16)) {
if (DstTy == LLT::fixed_vector(2, 16)) {
unsigned DstSize = DstTy.getSizeInBits();
unsigned SrcSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
unsigned Src0BankID = getRegBankID(MI.getOperand(1).getReg(), MRI);

View File

@ -69,10 +69,10 @@ MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) {
const LLT s1 = LLT::scalar(1);
const LLT s32 = LLT::scalar(32);
const LLT s64 = LLT::scalar(64);
const LLT v16s8 = LLT::vector(16, 8);
const LLT v8s16 = LLT::vector(8, 16);
const LLT v4s32 = LLT::vector(4, 32);
const LLT v2s64 = LLT::vector(2, 64);
const LLT v16s8 = LLT::fixed_vector(16, 8);
const LLT v8s16 = LLT::fixed_vector(8, 16);
const LLT v4s32 = LLT::fixed_vector(4, 32);
const LLT v2s64 = LLT::fixed_vector(2, 64);
const LLT p0 = LLT::pointer(0, 32);
getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})

View File

@ -294,8 +294,8 @@ void X86LegalizerInfo::setLegalizerInfoSSE1() {
const LLT s32 = LLT::scalar(32);
const LLT s64 = LLT::scalar(64);
const LLT v4s32 = LLT::vector(4, 32);
const LLT v2s64 = LLT::vector(2, 64);
const LLT v4s32 = LLT::fixed_vector(4, 32);
const LLT v2s64 = LLT::fixed_vector(2, 64);
auto &LegacyInfo = getLegacyLegalizerInfo();
@ -327,15 +327,15 @@ void X86LegalizerInfo::setLegalizerInfoSSE2() {
const LLT s32 = LLT::scalar(32);
const LLT s64 = LLT::scalar(64);
const LLT v16s8 = LLT::vector(16, 8);
const LLT v8s16 = LLT::vector(8, 16);
const LLT v4s32 = LLT::vector(4, 32);
const LLT v2s64 = LLT::vector(2, 64);
const LLT v16s8 = LLT::fixed_vector(16, 8);
const LLT v8s16 = LLT::fixed_vector(8, 16);
const LLT v4s32 = LLT::fixed_vector(4, 32);
const LLT v2s64 = LLT::fixed_vector(2, 64);
const LLT v32s8 = LLT::vector(32, 8);
const LLT v16s16 = LLT::vector(16, 16);
const LLT v8s32 = LLT::vector(8, 32);
const LLT v4s64 = LLT::vector(4, 64);
const LLT v32s8 = LLT::fixed_vector(32, 8);
const LLT v16s16 = LLT::fixed_vector(16, 16);
const LLT v8s32 = LLT::fixed_vector(8, 32);
const LLT v4s64 = LLT::fixed_vector(4, 64);
auto &LegacyInfo = getLegacyLegalizerInfo();
@ -377,7 +377,7 @@ void X86LegalizerInfo::setLegalizerInfoSSE41() {
if (!Subtarget.hasSSE41())
return;
const LLT v4s32 = LLT::vector(4, 32);
const LLT v4s32 = LLT::fixed_vector(4, 32);
auto &LegacyInfo = getLegacyLegalizerInfo();
@ -388,19 +388,19 @@ void X86LegalizerInfo::setLegalizerInfoAVX() {
if (!Subtarget.hasAVX())
return;
const LLT v16s8 = LLT::vector(16, 8);
const LLT v8s16 = LLT::vector(8, 16);
const LLT v4s32 = LLT::vector(4, 32);
const LLT v2s64 = LLT::vector(2, 64);
const LLT v16s8 = LLT::fixed_vector(16, 8);
const LLT v8s16 = LLT::fixed_vector(8, 16);
const LLT v4s32 = LLT::fixed_vector(4, 32);
const LLT v2s64 = LLT::fixed_vector(2, 64);
const LLT v32s8 = LLT::vector(32, 8);
const LLT v64s8 = LLT::vector(64, 8);
const LLT v16s16 = LLT::vector(16, 16);
const LLT v32s16 = LLT::vector(32, 16);
const LLT v8s32 = LLT::vector(8, 32);
const LLT v16s32 = LLT::vector(16, 32);
const LLT v4s64 = LLT::vector(4, 64);
const LLT v8s64 = LLT::vector(8, 64);
const LLT v32s8 = LLT::fixed_vector(32, 8);
const LLT v64s8 = LLT::fixed_vector(64, 8);
const LLT v16s16 = LLT::fixed_vector(16, 16);
const LLT v32s16 = LLT::fixed_vector(32, 16);
const LLT v8s32 = LLT::fixed_vector(8, 32);
const LLT v16s32 = LLT::fixed_vector(16, 32);
const LLT v4s64 = LLT::fixed_vector(4, 64);
const LLT v8s64 = LLT::fixed_vector(8, 64);
auto &LegacyInfo = getLegacyLegalizerInfo();
@ -435,15 +435,15 @@ void X86LegalizerInfo::setLegalizerInfoAVX2() {
if (!Subtarget.hasAVX2())
return;
const LLT v32s8 = LLT::vector(32, 8);
const LLT v16s16 = LLT::vector(16, 16);
const LLT v8s32 = LLT::vector(8, 32);
const LLT v4s64 = LLT::vector(4, 64);
const LLT v32s8 = LLT::fixed_vector(32, 8);
const LLT v16s16 = LLT::fixed_vector(16, 16);
const LLT v8s32 = LLT::fixed_vector(8, 32);
const LLT v4s64 = LLT::fixed_vector(4, 64);
const LLT v64s8 = LLT::vector(64, 8);
const LLT v32s16 = LLT::vector(32, 16);
const LLT v16s32 = LLT::vector(16, 32);
const LLT v8s64 = LLT::vector(8, 64);
const LLT v64s8 = LLT::fixed_vector(64, 8);
const LLT v32s16 = LLT::fixed_vector(32, 16);
const LLT v16s32 = LLT::fixed_vector(16, 32);
const LLT v8s64 = LLT::fixed_vector(8, 64);
auto &LegacyInfo = getLegacyLegalizerInfo();
@ -471,20 +471,20 @@ void X86LegalizerInfo::setLegalizerInfoAVX512() {
if (!Subtarget.hasAVX512())
return;
const LLT v16s8 = LLT::vector(16, 8);
const LLT v8s16 = LLT::vector(8, 16);
const LLT v4s32 = LLT::vector(4, 32);
const LLT v2s64 = LLT::vector(2, 64);
const LLT v16s8 = LLT::fixed_vector(16, 8);
const LLT v8s16 = LLT::fixed_vector(8, 16);
const LLT v4s32 = LLT::fixed_vector(4, 32);
const LLT v2s64 = LLT::fixed_vector(2, 64);
const LLT v32s8 = LLT::vector(32, 8);
const LLT v16s16 = LLT::vector(16, 16);
const LLT v8s32 = LLT::vector(8, 32);
const LLT v4s64 = LLT::vector(4, 64);
const LLT v32s8 = LLT::fixed_vector(32, 8);
const LLT v16s16 = LLT::fixed_vector(16, 16);
const LLT v8s32 = LLT::fixed_vector(8, 32);
const LLT v4s64 = LLT::fixed_vector(4, 64);
const LLT v64s8 = LLT::vector(64, 8);
const LLT v32s16 = LLT::vector(32, 16);
const LLT v16s32 = LLT::vector(16, 32);
const LLT v8s64 = LLT::vector(8, 64);
const LLT v64s8 = LLT::fixed_vector(64, 8);
const LLT v32s16 = LLT::fixed_vector(32, 16);
const LLT v16s32 = LLT::fixed_vector(16, 32);
const LLT v8s64 = LLT::fixed_vector(8, 64);
auto &LegacyInfo = getLegacyLegalizerInfo();
@ -519,7 +519,7 @@ void X86LegalizerInfo::setLegalizerInfoAVX512DQ() {
if (!(Subtarget.hasAVX512() && Subtarget.hasDQI()))
return;
const LLT v8s64 = LLT::vector(8, 64);
const LLT v8s64 = LLT::fixed_vector(8, 64);
auto &LegacyInfo = getLegacyLegalizerInfo();
@ -529,8 +529,8 @@ void X86LegalizerInfo::setLegalizerInfoAVX512DQ() {
if (!Subtarget.hasVLX())
return;
const LLT v2s64 = LLT::vector(2, 64);
const LLT v4s64 = LLT::vector(4, 64);
const LLT v2s64 = LLT::fixed_vector(2, 64);
const LLT v4s64 = LLT::fixed_vector(4, 64);
for (auto Ty : {v2s64, v4s64})
LegacyInfo.setAction({G_MUL, Ty}, LegacyLegalizeActions::Legal);
@ -540,8 +540,8 @@ void X86LegalizerInfo::setLegalizerInfoAVX512BW() {
if (!(Subtarget.hasAVX512() && Subtarget.hasBWI()))
return;
const LLT v64s8 = LLT::vector(64, 8);
const LLT v32s16 = LLT::vector(32, 16);
const LLT v64s8 = LLT::fixed_vector(64, 8);
const LLT v32s16 = LLT::fixed_vector(32, 16);
auto &LegacyInfo = getLegacyLegalizerInfo();
@ -555,8 +555,8 @@ void X86LegalizerInfo::setLegalizerInfoAVX512BW() {
if (!Subtarget.hasVLX())
return;
const LLT v8s16 = LLT::vector(8, 16);
const LLT v16s16 = LLT::vector(16, 16);
const LLT v8s16 = LLT::fixed_vector(8, 16);
const LLT v16s16 = LLT::fixed_vector(16, 16);
for (auto Ty : {v8s16, v16s16})
LegacyInfo.setAction({G_MUL, Ty}, LegacyLegalizeActions::Legal);

View File

@ -59,12 +59,12 @@ TEST_F(AArch64GISelMITest, TestCSE) {
// Make sure buildConstant with a vector type doesn't crash, and the elements
// CSE.
auto Splat0 = CSEB.buildConstant(LLT::vector(2, s32), 0);
auto Splat0 = CSEB.buildConstant(LLT::fixed_vector(2, s32), 0);
EXPECT_EQ(TargetOpcode::G_BUILD_VECTOR, Splat0->getOpcode());
EXPECT_EQ(Splat0.getReg(1), Splat0.getReg(2));
EXPECT_EQ(&*MIBCst, MRI->getVRegDef(Splat0.getReg(1)));
auto FSplat = CSEB.buildFConstant(LLT::vector(2, s32), 1.0);
auto FSplat = CSEB.buildFConstant(LLT::fixed_vector(2, s32), 1.0);
EXPECT_EQ(TargetOpcode::G_BUILD_VECTOR, FSplat->getOpcode());
EXPECT_EQ(FSplat.getReg(1), FSplat.getReg(2));
EXPECT_EQ(&*MIBFP0, MRI->getVRegDef(FSplat.getReg(1)));

View File

@ -20,30 +20,30 @@ static const LLT S64 = LLT::scalar(64);
static const LLT P0 = LLT::pointer(0, 64);
static const LLT P1 = LLT::pointer(1, 32);
static const LLT V2S8 = LLT::vector(2, 8);
static const LLT V4S8 = LLT::vector(4, 8);
static const LLT V8S8 = LLT::vector(8, 8);
static const LLT V2S8 = LLT::fixed_vector(2, 8);
static const LLT V4S8 = LLT::fixed_vector(4, 8);
static const LLT V8S8 = LLT::fixed_vector(8, 8);
static const LLT V2S16 = LLT::vector(2, 16);
static const LLT V3S16 = LLT::vector(3, 16);
static const LLT V4S16 = LLT::vector(4, 16);
static const LLT V2S16 = LLT::fixed_vector(2, 16);
static const LLT V3S16 = LLT::fixed_vector(3, 16);
static const LLT V4S16 = LLT::fixed_vector(4, 16);
static const LLT V2S32 = LLT::vector(2, 32);
static const LLT V3S32 = LLT::vector(3, 32);
static const LLT V4S32 = LLT::vector(4, 32);
static const LLT V6S32 = LLT::vector(6, 32);
static const LLT V2S32 = LLT::fixed_vector(2, 32);
static const LLT V3S32 = LLT::fixed_vector(3, 32);
static const LLT V4S32 = LLT::fixed_vector(4, 32);
static const LLT V6S32 = LLT::fixed_vector(6, 32);
static const LLT V2S64 = LLT::vector(2, 64);
static const LLT V3S64 = LLT::vector(3, 64);
static const LLT V4S64 = LLT::vector(4, 64);
static const LLT V2S64 = LLT::fixed_vector(2, 64);
static const LLT V3S64 = LLT::fixed_vector(3, 64);
static const LLT V4S64 = LLT::fixed_vector(4, 64);
static const LLT V2P0 = LLT::vector(2, P0);
static const LLT V3P0 = LLT::vector(3, P0);
static const LLT V4P0 = LLT::vector(4, P0);
static const LLT V6P0 = LLT::vector(6, P0);
static const LLT V2P0 = LLT::fixed_vector(2, P0);
static const LLT V3P0 = LLT::fixed_vector(3, P0);
static const LLT V4P0 = LLT::fixed_vector(4, P0);
static const LLT V6P0 = LLT::fixed_vector(6, P0);
static const LLT V2P1 = LLT::vector(2, P1);
static const LLT V4P1 = LLT::vector(4, P1);
static const LLT V2P1 = LLT::fixed_vector(2, P1);
static const LLT V4P1 = LLT::fixed_vector(4, P1);
TEST(GISelUtilsTest, getGCDType) {
EXPECT_EQ(S1, getGCDType(S1, S1));
@ -118,14 +118,17 @@ TEST(GISelUtilsTest, getGCDType) {
EXPECT_EQ(S32, getGCDType(V2S32, V4S8));
// Test cases where neither element type nicely divides.
EXPECT_EQ(LLT::scalar(3), getGCDType(LLT::vector(3, 5), LLT::vector(2, 6)));
EXPECT_EQ(LLT::scalar(3), getGCDType(LLT::vector(2, 6), LLT::vector(3, 5)));
EXPECT_EQ(LLT::scalar(3),
getGCDType(LLT::fixed_vector(3, 5), LLT::fixed_vector(2, 6)));
EXPECT_EQ(LLT::scalar(3),
getGCDType(LLT::fixed_vector(2, 6), LLT::fixed_vector(3, 5)));
// Have to go smaller than a pointer element.
EXPECT_EQ(LLT::scalar(3), getGCDType(LLT::vector(2, LLT::pointer(3, 6)),
LLT::vector(3, 5)));
EXPECT_EQ(LLT::scalar(3), getGCDType(LLT::vector(3, 5),
LLT::vector(2, LLT::pointer(3, 6))));
EXPECT_EQ(LLT::scalar(3), getGCDType(LLT::fixed_vector(2, LLT::pointer(3, 6)),
LLT::fixed_vector(3, 5)));
EXPECT_EQ(LLT::scalar(3),
getGCDType(LLT::fixed_vector(3, 5),
LLT::fixed_vector(2, LLT::pointer(3, 6))));
EXPECT_EQ(V4S8, getGCDType(V4S8, S32));
EXPECT_EQ(S32, getGCDType(S32, V4S8));
@ -135,18 +138,19 @@ TEST(GISelUtilsTest, getGCDType) {
EXPECT_EQ(V2S8, getGCDType(V2S8, V4S16));
EXPECT_EQ(S16, getGCDType(V4S16, V2S8));
EXPECT_EQ(S8, getGCDType(V2S8, LLT::vector(4, 2)));
EXPECT_EQ(LLT::vector(4, 2), getGCDType(LLT::vector(4, 2), S8));
EXPECT_EQ(S8, getGCDType(V2S8, LLT::fixed_vector(4, 2)));
EXPECT_EQ(LLT::fixed_vector(4, 2), getGCDType(LLT::fixed_vector(4, 2), S8));
EXPECT_EQ(LLT::pointer(4, 8),
getGCDType(LLT::fixed_vector(2, LLT::pointer(4, 8)),
LLT::fixed_vector(4, 2)));
EXPECT_EQ(LLT::pointer(4, 8), getGCDType(LLT::vector(2, LLT::pointer(4, 8)),
LLT::vector(4, 2)));
EXPECT_EQ(LLT::fixed_vector(4, 2),
getGCDType(LLT::fixed_vector(4, 2),
LLT::fixed_vector(2, LLT::pointer(4, 8))));
EXPECT_EQ(LLT::vector(4, 2), getGCDType(LLT::vector(4, 2),
LLT::vector(2, LLT::pointer(4, 8))));
EXPECT_EQ(LLT::scalar(4), getGCDType(LLT::vector(3, 4), S8));
EXPECT_EQ(LLT::scalar(4), getGCDType(S8, LLT::vector(3, 4)));
EXPECT_EQ(LLT::scalar(4), getGCDType(LLT::fixed_vector(3, 4), S8));
EXPECT_EQ(LLT::scalar(4), getGCDType(S8, LLT::fixed_vector(3, 4)));
}
TEST(GISelUtilsTest, getLCMType) {
@ -178,8 +182,8 @@ TEST(GISelUtilsTest, getLCMType) {
EXPECT_EQ(V2S32, getLCMType(V2S32, V2S32));
EXPECT_EQ(V6S32, getLCMType(V2S32, V3S32));
EXPECT_EQ(V6S32, getLCMType(V3S32, V2S32));
EXPECT_EQ(LLT::vector(12, S32), getLCMType(V4S32, V3S32));
EXPECT_EQ(LLT::vector(12, S32), getLCMType(V3S32, V4S32));
EXPECT_EQ(LLT::fixed_vector(12, S32), getLCMType(V4S32, V3S32));
EXPECT_EQ(LLT::fixed_vector(12, S32), getLCMType(V3S32, V4S32));
EXPECT_EQ(V2P0, getLCMType(V2P0, V2P0));
EXPECT_EQ(V2P0, getLCMType(V2P0, P0));
@ -187,14 +191,14 @@ TEST(GISelUtilsTest, getLCMType) {
EXPECT_EQ(V2P0, getLCMType(V2P0, V2P0));
EXPECT_EQ(V6P0, getLCMType(V2P0, V3P0));
EXPECT_EQ(V6P0, getLCMType(V3P0, V2P0));
EXPECT_EQ(LLT::vector(12, P0), getLCMType(V4P0, V3P0));
EXPECT_EQ(LLT::vector(12, P0), getLCMType(V3P0, V4P0));
EXPECT_EQ(LLT::fixed_vector(12, P0), getLCMType(V4P0, V3P0));
EXPECT_EQ(LLT::fixed_vector(12, P0), getLCMType(V3P0, V4P0));
EXPECT_EQ(LLT::vector(12, S64), getLCMType(V4S64, V3P0));
EXPECT_EQ(LLT::vector(12, P0), getLCMType(V3P0, V4S64));
EXPECT_EQ(LLT::fixed_vector(12, S64), getLCMType(V4S64, V3P0));
EXPECT_EQ(LLT::fixed_vector(12, P0), getLCMType(V3P0, V4S64));
EXPECT_EQ(LLT::vector(12, P0), getLCMType(V4P0, V3S64));
EXPECT_EQ(LLT::vector(12, S64), getLCMType(V3S64, V4P0));
EXPECT_EQ(LLT::fixed_vector(12, P0), getLCMType(V4P0, V3S64));
EXPECT_EQ(LLT::fixed_vector(12, S64), getLCMType(V3S64, V4P0));
EXPECT_EQ(V2P0, getLCMType(V2P0, S32));
EXPECT_EQ(V4S32, getLCMType(S32, V2P0));
@ -221,18 +225,18 @@ TEST(GISelUtilsTest, getLCMType) {
EXPECT_EQ(V2S16, getLCMType(V2S16, V4S8));
EXPECT_EQ(V4S8, getLCMType(V4S8, V2S16));
EXPECT_EQ(LLT::vector(6, S16), getLCMType(V3S16, V4S8));
EXPECT_EQ(LLT::vector(12, S8), getLCMType(V4S8, V3S16));
EXPECT_EQ(LLT::fixed_vector(6, S16), getLCMType(V3S16, V4S8));
EXPECT_EQ(LLT::fixed_vector(12, S8), getLCMType(V4S8, V3S16));
EXPECT_EQ(V4S16, getLCMType(V4S16, V4S8));
EXPECT_EQ(V8S8, getLCMType(V4S8, V4S16));
EXPECT_EQ(LLT::vector(6, 4), getLCMType(LLT::vector(3, 4), S8));
EXPECT_EQ(LLT::vector(3, 8), getLCMType(S8, LLT::vector(3, 4)));
EXPECT_EQ(LLT::fixed_vector(6, 4), getLCMType(LLT::fixed_vector(3, 4), S8));
EXPECT_EQ(LLT::fixed_vector(3, 8), getLCMType(S8, LLT::fixed_vector(3, 4)));
EXPECT_EQ(LLT::vector(6, 4),
getLCMType(LLT::vector(3, 4), LLT::pointer(4, 8)));
EXPECT_EQ(LLT::vector(3, LLT::pointer(4, 8)),
getLCMType(LLT::pointer(4, 8), LLT::vector(3, 4)));
EXPECT_EQ(LLT::fixed_vector(6, 4),
getLCMType(LLT::fixed_vector(3, 4), LLT::pointer(4, 8)));
EXPECT_EQ(LLT::fixed_vector(3, LLT::pointer(4, 8)),
getLCMType(LLT::pointer(4, 8), LLT::fixed_vector(3, 4)));
EXPECT_EQ(V2S64, getLCMType(V2S64, P0));
EXPECT_EQ(V2P0, getLCMType(P0, V2S64));

View File

@ -614,7 +614,7 @@ TEST_F(AArch64GISelMITest, TestVectorSignBitIsZero) {
if (!TM)
return;
const LLT V2S32 = LLT::vector(2, 32);
const LLT V2S32 = LLT::fixed_vector(2, 32);
// Vector buildConstant makes splat G_BUILD_VECTOR instruction.
auto SignBit = B.buildConstant(V2S32, 0x80000000);
auto Zero = B.buildConstant(V2S32, 0);

View File

@ -145,7 +145,7 @@ TEST_F(AArch64GISelMITest, LowerRotatesVector) {
getActionDefinitionsBuilder({G_ROTR, G_ROTL}).lower(); });
LLT S32 = LLT::scalar(32);
LLT V4S32 = LLT::vector(4, S32);
LLT V4S32 = LLT::fixed_vector(4, S32);
auto SrcTrunc = B.buildTrunc(S32, Copies[0]);
auto Src = B.buildSplatVector(V4S32, SrcTrunc);
auto AmtTrunc = B.buildTrunc(S32, Copies[1]);
@ -1346,8 +1346,8 @@ TEST_F(AArch64GISelMITest, FewerElementsAnd) {
if (!TM)
return;
const LLT V2S32 = LLT::vector(2, 32);
const LLT V5S32 = LLT::vector(5, 32);
const LLT V2S32 = LLT::fixed_vector(2, 32);
const LLT V5S32 = LLT::fixed_vector(5, 32);
// Declare your legalization info
DefineLegalizerInfo(A, {
@ -1402,8 +1402,8 @@ TEST_F(AArch64GISelMITest, MoreElementsAnd) {
return;
LLT s32 = LLT::scalar(32);
LLT v2s32 = LLT::vector(2, 32);
LLT v6s32 = LLT::vector(6, 32);
LLT v2s32 = LLT::fixed_vector(2, 32);
LLT v6s32 = LLT::fixed_vector(6, 32);
LegalizerInfo LI;
LI.getActionDefinitionsBuilder(TargetOpcode::G_AND)
@ -1447,8 +1447,8 @@ TEST_F(AArch64GISelMITest, FewerElementsPhi) {
LLT s1 = LLT::scalar(1);
LLT s32 = LLT::scalar(32);
LLT s64 = LLT::scalar(64);
LLT v2s32 = LLT::vector(2, 32);
LLT v5s32 = LLT::vector(5, 32);
LLT v2s32 = LLT::fixed_vector(2, 32);
LLT v5s32 = LLT::fixed_vector(5, 32);
LegalizerInfo LI;
LI.getActionDefinitionsBuilder(TargetOpcode::G_PHI)
@ -1590,11 +1590,11 @@ TEST_F(AArch64GISelMITest, LowerMinMax) {
return;
LLT s64 = LLT::scalar(64);
LLT v2s32 = LLT::vector(2, 32);
LLT v2s32 = LLT::fixed_vector(2, 32);
DefineLegalizerInfo(A, {
getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
.lowerFor({s64, LLT::vector(2, s32)});
.lowerFor({s64, LLT::fixed_vector(2, s32)});
});
auto SMin = B.buildSMin(s64, Copies[0], Copies[1]);
@ -1678,12 +1678,12 @@ TEST_F(AArch64GISelMITest, WidenScalarBuildVector) {
LLT S32 = LLT::scalar(32);
LLT S16 = LLT::scalar(16);
LLT V2S16 = LLT::vector(2, S16);
LLT V2S32 = LLT::vector(2, S32);
LLT V2S16 = LLT::fixed_vector(2, S16);
LLT V2S32 = LLT::fixed_vector(2, S32);
DefineLegalizerInfo(A, {
getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
.lowerFor({s64, LLT::vector(2, s32)});
.lowerFor({s64, LLT::fixed_vector(2, s32)});
});
AInfo Info(MF->getSubtarget());
@ -3122,7 +3122,7 @@ TEST_F(AArch64GISelMITest, LowerInsert) {
LLT S64{LLT::scalar(64)};
LLT P0{LLT::pointer(0, 64)};
LLT P1{LLT::pointer(1, 32)};
LLT V2S32{LLT::vector(2, 32)};
LLT V2S32{LLT::fixed_vector(2, 32)};
auto TruncS32 = B.buildTrunc(S32, Copies[0]);
auto IntToPtrP0 = B.buildIntToPtr(P0, Copies[0]);
@ -3251,8 +3251,8 @@ TEST_F(AArch64GISelMITest, LowerBSWAP) {
DefineLegalizerInfo(A, {});
// Make sure vector lowering doesn't assert.
auto Cast = B.buildBitcast(LLT::vector(2, 32), Copies[0]);
auto BSwap = B.buildBSwap(LLT::vector(2, 32), Cast);
auto Cast = B.buildBitcast(LLT::fixed_vector(2, 32), Copies[0]);
auto BSwap = B.buildBSwap(LLT::fixed_vector(2, 32), Cast);
AInfo Info(MF->getSubtarget());
DummyGISelObserver Observer;
LegalizerHelper Helper(*MF, Info, Observer, B);
@ -3402,7 +3402,7 @@ TEST_F(AArch64GISelMITest, BitcastLoad) {
LLT P0 = LLT::pointer(0, 64);
LLT S32 = LLT::scalar(32);
LLT V4S8 = LLT::vector(4, 8);
LLT V4S8 = LLT::fixed_vector(4, 8);
auto Ptr = B.buildUndef(P0);
DefineLegalizerInfo(A, {});
@ -3436,7 +3436,7 @@ TEST_F(AArch64GISelMITest, BitcastStore) {
LLT P0 = LLT::pointer(0, 64);
LLT S32 = LLT::scalar(32);
LLT V4S8 = LLT::vector(4, 8);
LLT V4S8 = LLT::fixed_vector(4, 8);
auto Ptr = B.buildUndef(P0);
DefineLegalizerInfo(A, {});
@ -3470,7 +3470,7 @@ TEST_F(AArch64GISelMITest, BitcastSelect) {
LLT S1 = LLT::scalar(1);
LLT S32 = LLT::scalar(32);
LLT V4S8 = LLT::vector(4, 8);
LLT V4S8 = LLT::fixed_vector(4, 8);
DefineLegalizerInfo(A, {});
@ -3500,7 +3500,7 @@ TEST_F(AArch64GISelMITest, BitcastSelect) {
EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
// Doesn't make sense
auto VCond = B.buildUndef(LLT::vector(4, 1));
auto VCond = B.buildUndef(LLT::fixed_vector(4, 1));
auto VSelect = B.buildSelect(V4S8, VCond, Val0, Val1);
B.setInsertPt(*EntryMBB, VSelect->getIterator());
@ -3516,7 +3516,7 @@ TEST_F(AArch64GISelMITest, BitcastBitOps) {
return;
LLT S32 = LLT::scalar(32);
LLT V4S8 = LLT::vector(4, 8);
LLT V4S8 = LLT::fixed_vector(4, 8);
DefineLegalizerInfo(A, {});
@ -3601,7 +3601,7 @@ TEST_F(AArch64GISelMITest, NarrowImplicitDef) {
LLT S32{LLT::scalar(32)};
LLT S48{LLT::scalar(48)};
LLT S64{LLT::scalar(64)};
LLT V2S64{{LLT::vector(2, 64)}};
LLT V2S64{{LLT::fixed_vector(2, 64)}};
auto Implicit1 = B.buildUndef(S64);
auto Implicit2 = B.buildUndef(S64);
@ -3663,8 +3663,8 @@ TEST_F(AArch64GISelMITest, WidenFreeze) {
// Make sure that G_FREEZE is widened with anyext
LLT S64{LLT::scalar(64)};
LLT S128{LLT::scalar(128)};
LLT V2S32{LLT::vector(2, 32)};
LLT V2S64{LLT::vector(2, 64)};
LLT V2S32{LLT::fixed_vector(2, 32)};
LLT V2S64{LLT::fixed_vector(2, 64)};
auto Vector = B.buildBitcast(V2S32, Copies[0]);
@ -3715,8 +3715,8 @@ TEST_F(AArch64GISelMITest, NarrowFreeze) {
LLT S32{LLT::scalar(32)};
LLT S33{LLT::scalar(33)};
LLT S64{LLT::scalar(64)};
LLT V2S16{LLT::vector(2, 16)};
LLT V2S32{LLT::vector(2, 32)};
LLT V2S16{LLT::fixed_vector(2, 16)};
LLT V2S32{LLT::fixed_vector(2, 32)};
auto Trunc = B.buildTrunc(S33, {Copies[0]});
auto Vector = B.buildBitcast(V2S32, Copies[0]);
@ -3799,9 +3799,9 @@ TEST_F(AArch64GISelMITest, FewerElementsFreeze) {
DefineLegalizerInfo(A, {});
LLT S32{LLT::scalar(32)};
LLT V2S16{LLT::vector(2, 16)};
LLT V2S32{LLT::vector(2, 32)};
LLT V4S16{LLT::vector(4, 16)};
LLT V2S16{LLT::fixed_vector(2, 16)};
LLT V2S32{LLT::fixed_vector(2, 32)};
LLT V4S16{LLT::fixed_vector(4, 16)};
auto Vector1 = B.buildBitcast(V2S32, Copies[0]);
auto Vector2 = B.buildBitcast(V4S16, Copies[0]);
@ -3851,8 +3851,8 @@ TEST_F(AArch64GISelMITest, MoreElementsFreeze) {
DefineLegalizerInfo(A, {});
LLT V2S32{LLT::vector(2, 32)};
LLT V4S32{LLT::vector(4, 32)};
LLT V2S32{LLT::fixed_vector(2, 32)};
LLT V4S32{LLT::fixed_vector(4, 32)};
auto Vector1 = B.buildBitcast(V2S32, Copies[0]);
auto FreezeVector1 = B.buildInstr(TargetOpcode::G_FREEZE, {V2S32}, {Vector1});
@ -3890,9 +3890,9 @@ TEST_F(AArch64GISelMITest, FewerElementsInsertVectorElt) {
LLT P0{LLT::pointer(0, 64)};
LLT S64{LLT::scalar(64)};
LLT S16{LLT::scalar(16)};
LLT V2S16{LLT::vector(2, 16)};
LLT V3S16{LLT::vector(3, 16)};
LLT V8S16{LLT::vector(8, 16)};
LLT V2S16{LLT::fixed_vector(2, 16)};
LLT V3S16{LLT::fixed_vector(3, 16)};
LLT V8S16{LLT::fixed_vector(8, 16)};
auto Ptr0 = B.buildIntToPtr(P0, Copies[0]);
auto VectorV8 = B.buildLoad(V8S16, Ptr0, MachinePointerInfo(), Align(8));

View File

@ -93,17 +93,17 @@ TEST(LegalizerInfoTest, VectorRISC) {
LegalizerInfo L;
auto &LegacyInfo = L.getLegacyLegalizerInfo();
// Typical RISCy set of operations based on ARM.
LegacyInfo.setAction({G_ADD, LLT::vector(8, 8)},
LegacyInfo.setAction({G_ADD, LLT::fixed_vector(8, 8)},
LegacyLegalizeActions::Legal);
LegacyInfo.setAction({G_ADD, LLT::vector(16, 8)},
LegacyInfo.setAction({G_ADD, LLT::fixed_vector(16, 8)},
LegacyLegalizeActions::Legal);
LegacyInfo.setAction({G_ADD, LLT::vector(4, 16)},
LegacyInfo.setAction({G_ADD, LLT::fixed_vector(4, 16)},
LegacyLegalizeActions::Legal);
LegacyInfo.setAction({G_ADD, LLT::vector(8, 16)},
LegacyInfo.setAction({G_ADD, LLT::fixed_vector(8, 16)},
LegacyLegalizeActions::Legal);
LegacyInfo.setAction({G_ADD, LLT::vector(2, 32)},
LegacyInfo.setAction({G_ADD, LLT::fixed_vector(2, 32)},
LegacyLegalizeActions::Legal);
LegacyInfo.setAction({G_ADD, LLT::vector(4, 32)},
LegacyInfo.setAction({G_ADD, LLT::fixed_vector(4, 32)},
LegacyLegalizeActions::Legal);
LegacyInfo.setLegalizeVectorElementToDifferentSizeStrategy(
@ -116,19 +116,19 @@ TEST(LegalizerInfoTest, VectorRISC) {
// Check we infer the correct types and actually do what we're told for some
// simple cases.
EXPECT_EQ(L.getAction({G_ADD, {LLT::vector(8, 8)}}),
EXPECT_EQ(L.getAction({G_ADD, {LLT::fixed_vector(8, 8)}}),
LegalizeActionStep(Legal, 0, LLT{}));
EXPECT_EQ(L.getAction({G_ADD, {LLT::vector(8, 7)}}),
LegalizeActionStep(WidenScalar, 0, LLT::vector(8, 8)));
EXPECT_EQ(L.getAction({G_ADD, {LLT::vector(2, 8)}}),
LegalizeActionStep(MoreElements, 0, LLT::vector(8, 8)));
EXPECT_EQ(L.getAction({G_ADD, {LLT::vector(8, 32)}}),
LegalizeActionStep(FewerElements, 0, LLT::vector(4, 32)));
EXPECT_EQ(L.getAction({G_ADD, {LLT::fixed_vector(8, 7)}}),
LegalizeActionStep(WidenScalar, 0, LLT::fixed_vector(8, 8)));
EXPECT_EQ(L.getAction({G_ADD, {LLT::fixed_vector(2, 8)}}),
LegalizeActionStep(MoreElements, 0, LLT::fixed_vector(8, 8)));
EXPECT_EQ(L.getAction({G_ADD, {LLT::fixed_vector(8, 32)}}),
LegalizeActionStep(FewerElements, 0, LLT::fixed_vector(4, 32)));
// Check a few non-power-of-2 sizes:
EXPECT_EQ(L.getAction({G_ADD, {LLT::vector(3, 3)}}),
LegalizeActionStep(WidenScalar, 0, LLT::vector(3, 8)));
EXPECT_EQ(L.getAction({G_ADD, {LLT::vector(3, 8)}}),
LegalizeActionStep(MoreElements, 0, LLT::vector(8, 8)));
EXPECT_EQ(L.getAction({G_ADD, {LLT::fixed_vector(3, 3)}}),
LegalizeActionStep(WidenScalar, 0, LLT::fixed_vector(3, 8)));
EXPECT_EQ(L.getAction({G_ADD, {LLT::fixed_vector(3, 8)}}),
LegalizeActionStep(MoreElements, 0, LLT::fixed_vector(8, 8)));
}
TEST(LegalizerInfoTest, MultipleTypes) {
@ -228,18 +228,18 @@ TEST(LegalizerInfoTest, RuleSets) {
const LLT s33 = LLT::scalar(33);
const LLT s64 = LLT::scalar(64);
const LLT v2s5 = LLT::vector(2, 5);
const LLT v2s8 = LLT::vector(2, 8);
const LLT v2s16 = LLT::vector(2, 16);
const LLT v2s32 = LLT::vector(2, 32);
const LLT v3s32 = LLT::vector(3, 32);
const LLT v4s32 = LLT::vector(4, 32);
const LLT v2s33 = LLT::vector(2, 33);
const LLT v2s64 = LLT::vector(2, 64);
const LLT v2s5 = LLT::fixed_vector(2, 5);
const LLT v2s8 = LLT::fixed_vector(2, 8);
const LLT v2s16 = LLT::fixed_vector(2, 16);
const LLT v2s32 = LLT::fixed_vector(2, 32);
const LLT v3s32 = LLT::fixed_vector(3, 32);
const LLT v4s32 = LLT::fixed_vector(4, 32);
const LLT v2s33 = LLT::fixed_vector(2, 33);
const LLT v2s64 = LLT::fixed_vector(2, 64);
const LLT p0 = LLT::pointer(0, 32);
const LLT v3p0 = LLT::vector(3, p0);
const LLT v4p0 = LLT::vector(4, p0);
const LLT v3p0 = LLT::fixed_vector(3, p0);
const LLT v4p0 = LLT::fixed_vector(4, p0);
{
LegalizerInfo LI;

View File

@ -31,8 +31,8 @@ namespace {
DefineLegalizerInfo(ALegalizer, {
auto p0 = LLT::pointer(0, 64);
auto v2s8 = LLT::vector(2, 8);
auto v2s16 = LLT::vector(2, 16);
auto v2s8 = LLT::fixed_vector(2, 8);
auto v2s16 = LLT::fixed_vector(2, 16);
getActionDefinitionsBuilder(G_LOAD)
.legalForTypesWithMemDesc({{s16, p0, 8, 8}})
.scalarize(0)

View File

@ -17,8 +17,8 @@ TEST_F(AArch64GISelMITest, TestBuildConstantFConstant) {
B.buildConstant(LLT::scalar(32), 42);
B.buildFConstant(LLT::scalar(32), 1.0);
B.buildConstant(LLT::vector(2, 32), 99);
B.buildFConstant(LLT::vector(2, 32), 2.0);
B.buildConstant(LLT::fixed_vector(2, 32), 99);
B.buildFConstant(LLT::fixed_vector(2, 32), 2.0);
// Test APFloat overload.
APFloat KVal(APFloat::IEEEdouble(), "4.0");
@ -51,21 +51,21 @@ TEST_F(AArch64GISelMITest, TestBuildConstantFConstantDeath) {
// Test APInt version breaks
EXPECT_DEATH(B.buildConstant(LLT::scalar(16), APV32),
"creating constant with the wrong size");
EXPECT_DEATH(B.buildConstant(LLT::vector(2, 16), APV32),
EXPECT_DEATH(B.buildConstant(LLT::fixed_vector(2, 16), APV32),
"creating constant with the wrong size");
// Test ConstantInt version breaks
ConstantInt *CI = ConstantInt::get(Ctx, APV32);
EXPECT_DEATH(B.buildConstant(LLT::scalar(16), *CI),
"creating constant with the wrong size");
EXPECT_DEATH(B.buildConstant(LLT::vector(2, 16), *CI),
EXPECT_DEATH(B.buildConstant(LLT::fixed_vector(2, 16), *CI),
"creating constant with the wrong size");
APFloat DoubleVal(APFloat::IEEEdouble());
ConstantFP *CF = ConstantFP::get(Ctx, DoubleVal);
EXPECT_DEATH(B.buildFConstant(LLT::scalar(16), *CF),
"creating fconstant with the wrong size");
EXPECT_DEATH(B.buildFConstant(LLT::vector(2, 16), *CF),
EXPECT_DEATH(B.buildFConstant(LLT::fixed_vector(2, 16), *CF),
"creating fconstant with the wrong size");
}
@ -339,13 +339,13 @@ TEST_F(AArch64GISelMITest, BuildMerge) {
// G_MERGE_VALUES.
B.buildMerge(LLT::scalar(128), {RegC0, RegC1, RegC2, RegC3});
// Merging plain constants to a vector should produce a G_BUILD_VECTOR.
LLT V2x32 = LLT::vector(2, 32);
LLT V2x32 = LLT::fixed_vector(2, 32);
Register RegC0C1 =
B.buildMerge(V2x32, {RegC0, RegC1}).getReg(0);
Register RegC2C3 =
B.buildMerge(V2x32, {RegC2, RegC3}).getReg(0);
// Merging vector constants to a vector should produce a G_CONCAT_VECTORS.
B.buildMerge(LLT::vector(4, 32), {RegC0C1, RegC2C3});
B.buildMerge(LLT::fixed_vector(4, 32), {RegC0C1, RegC2C3});
// Merging vector constants to a plain type is not allowed.
// Nothing else to test.

View File

@ -428,7 +428,7 @@ TEST_F(AArch64GISelMITest, MatchSpecificType) {
m_GAdd(m_SpecificType(s64), m_Reg())));
// Try to match the destination type of a bitcast.
LLT v2s32 = LLT::vector(2, 32);
LLT v2s32 = LLT::fixed_vector(2, 32);
auto MIBCast = B.buildCast(v2s32, Copies[0]);
EXPECT_TRUE(
mi_match(MIBCast.getReg(0), *MRI, m_GBitcast(m_Reg())));

View File

@ -58,12 +58,11 @@ TEST(LowLevelTypeTest, Vector) {
ElementCount::getScalable(3), ElementCount::getScalable(4),
ElementCount::getScalable(32), ElementCount::getScalable(0xff)}) {
const LLT STy = LLT::scalar(S);
const LLT VTy = LLT::vector(EC.getKnownMinValue(), S, EC.isScalable());
const LLT VTy = LLT::vector(EC, S);
// Test the alternative vector().
{
const LLT VSTy =
LLT::vector(EC.getKnownMinValue(), STy, EC.isScalable());
const LLT VSTy = LLT::vector(EC, STy);
EXPECT_EQ(VTy, VSTy);
}
@ -102,15 +101,15 @@ TEST(LowLevelTypeTest, Vector) {
TEST(LowLevelTypeTest, ScalarOrVector) {
// Test version with number of bits for scalar type.
EXPECT_EQ(LLT::scalar(32), LLT::scalarOrVector(1, 32));
EXPECT_EQ(LLT::vector(2, 32), LLT::scalarOrVector(2, 32));
EXPECT_EQ(LLT::fixed_vector(2, 32), LLT::scalarOrVector(2, 32));
// Test version with LLT for scalar type.
EXPECT_EQ(LLT::scalar(32), LLT::scalarOrVector(1, LLT::scalar(32)));
EXPECT_EQ(LLT::vector(2, 32), LLT::scalarOrVector(2, LLT::scalar(32)));
EXPECT_EQ(LLT::fixed_vector(2, 32), LLT::scalarOrVector(2, LLT::scalar(32)));
// Test with pointer elements.
EXPECT_EQ(LLT::pointer(1, 32), LLT::scalarOrVector(1, LLT::pointer(1, 32)));
EXPECT_EQ(LLT::vector(2, LLT::pointer(1, 32)),
EXPECT_EQ(LLT::fixed_vector(2, LLT::pointer(1, 32)),
LLT::scalarOrVector(2, LLT::pointer(1, 32)));
}
@ -121,11 +120,11 @@ TEST(LowLevelTypeTest, ChangeElementType) {
const LLT S32 = LLT::scalar(32);
const LLT S64 = LLT::scalar(64);
const LLT V2S32 = LLT::vector(2, 32);
const LLT V2S64 = LLT::vector(2, 64);
const LLT V2S32 = LLT::fixed_vector(2, 32);
const LLT V2S64 = LLT::fixed_vector(2, 64);
const LLT V2P0 = LLT::vector(2, P0);
const LLT V2P1 = LLT::vector(2, P1);
const LLT V2P0 = LLT::fixed_vector(2, P0);
const LLT V2P1 = LLT::fixed_vector(2, P1);
EXPECT_EQ(S64, S32.changeElementType(S64));
EXPECT_EQ(S32, S32.changeElementType(S32));
@ -146,11 +145,11 @@ TEST(LowLevelTypeTest, ChangeElementType) {
EXPECT_EQ(V2S32, V2P0.changeElementType(S32));
// Similar tests for for scalable vectors.
const LLT NXV2S32 = LLT::vector(2, 32, true);
const LLT NXV2S64 = LLT::vector(2, 64, true);
const LLT NXV2S32 = LLT::scalable_vector(2, 32);
const LLT NXV2S64 = LLT::scalable_vector(2, 64);
const LLT NXV2P0 = LLT::vector(2, P0, true);
const LLT NXV2P1 = LLT::vector(2, P1, true);
const LLT NXV2P0 = LLT::scalable_vector(2, P0);
const LLT NXV2P1 = LLT::scalable_vector(2, P1);
EXPECT_EQ(NXV2S64, NXV2S32.changeElementType(S64));
EXPECT_EQ(NXV2S32, NXV2S64.changeElementType(S32));
@ -164,12 +163,12 @@ TEST(LowLevelTypeTest, ChangeElementType) {
TEST(LowLevelTypeTest, ChangeNumElements) {
const LLT P0 = LLT::pointer(0, 32);
const LLT V2P0 = LLT::vector(2, P0);
const LLT V3P0 = LLT::vector(3, P0);
const LLT V2P0 = LLT::fixed_vector(2, P0);
const LLT V3P0 = LLT::fixed_vector(3, P0);
const LLT S64 = LLT::scalar(64);
const LLT V2S64 = LLT::vector(2, 64);
const LLT V3S64 = LLT::vector(3, 64);
const LLT V2S64 = LLT::fixed_vector(2, 64);
const LLT V3S64 = LLT::fixed_vector(3, 64);
// Vector to scalar
EXPECT_EQ(S64, V2S64.changeNumElements(1));
@ -191,7 +190,7 @@ TEST(LowLevelTypeTest, ChangeNumElements) {
// Invalid to directly change the element size for pointers.
TEST(LowLevelTypeTest, ChangeElementTypeDeath) {
const LLT P0 = LLT::pointer(0, 32);
const LLT V2P0 = LLT::vector(2, P0);
const LLT V2P0 = LLT::fixed_vector(2, P0);
EXPECT_DEATH(P0.changeElementSize(64),
"invalid to directly change element size for pointers");
@ -222,7 +221,7 @@ TEST(LowLevelTypeTest, Pointer) {
ElementCount::getScalable(3), ElementCount::getScalable(4),
ElementCount::getScalable(256), ElementCount::getScalable(65535)}) {
const LLT Ty = LLT::pointer(AS, DL.getPointerSizeInBits(AS));
const LLT VTy = LLT::vector(EC.getKnownMinValue(), Ty, EC.isScalable());
const LLT VTy = LLT::vector(EC, Ty);
// Test kind.
ASSERT_TRUE(Ty.isValid());
@ -277,14 +276,14 @@ TEST(LowLevelTypeTest, Divide) {
EXPECT_EQ(LLT::scalar(32), LLT::pointer(0, 64).divide(2));
// Test dividing vectors.
EXPECT_EQ(LLT::scalar(32), LLT::vector(2, 32).divide(2));
EXPECT_EQ(LLT::vector(2, 32), LLT::vector(4, 32).divide(2));
EXPECT_EQ(LLT::scalar(32), LLT::fixed_vector(2, 32).divide(2));
EXPECT_EQ(LLT::fixed_vector(2, 32), LLT::fixed_vector(4, 32).divide(2));
// Test vector of pointers
EXPECT_EQ(LLT::pointer(1, 64),
LLT::vector(4, LLT::pointer(1, 64)).divide(4));
EXPECT_EQ(LLT::vector(2, LLT::pointer(1, 64)),
LLT::vector(4, LLT::pointer(1, 64)).divide(2));
LLT::fixed_vector(4, LLT::pointer(1, 64)).divide(4));
EXPECT_EQ(LLT::fixed_vector(2, LLT::pointer(1, 64)),
LLT::fixed_vector(4, LLT::pointer(1, 64)).divide(2));
}
}

View File

@ -138,8 +138,11 @@ public:
return;
}
if (Ty.isVector()) {
OS << "LLT::vector(" << Ty.getElementCount().getKnownMinValue() << ", "
<< Ty.getScalarSizeInBits() << ", " << Ty.isScalable() << ")";
OS << "LLT::vector("
<< (Ty.isScalable() ? "ElementCount::getScalable("
: "ElementCount::getFixed(")
<< Ty.getElementCount().getKnownMinValue() << "), "
<< Ty.getScalarSizeInBits() << ")";
return;
}
if (Ty.isPointer() && Ty.getSizeInBits() > 0) {
@ -195,9 +198,8 @@ static Optional<LLTCodeGen> MVTToLLT(MVT::SimpleValueType SVT) {
MVT VT(SVT);
if (VT.isVector() && !VT.getVectorElementCount().isScalar())
return LLTCodeGen(LLT::vector(VT.getVectorNumElements(),
VT.getScalarSizeInBits(),
VT.isScalableVector()));
return LLTCodeGen(
LLT::vector(VT.getVectorElementCount(), VT.getScalarSizeInBits()));
if (VT.isInteger() || VT.isFloatingPoint())
return LLTCodeGen(LLT::scalar(VT.getSizeInBits()));