Remove SequentialType from the type heirarchy.

Now that we have scalable vectors, there's a distinction that isn't
getting captured in the original SequentialType: some vectors don't have
a known element count, so counting the number of elements doesn't make
sense.

In some cases, there's a better way to express the commonality using
other methods. If we're dealing with GEPs, there's GEP methods; if we're
dealing with a ConstantDataSequential, we can query its element type
directly.

In the relatively few remaining cases, I just decided to write out
the type checks. We're talking about relatively few places, and I think
the abstraction doesn't really carry its weight. (See thread "[RFC]
Refactor class hierarchy of VectorType in the IR" on llvmdev.)

Differential Revision: https://reviews.llvm.org/D75661
This commit is contained in:
Eli Friedman 2020-04-06 17:03:49 -07:00
parent 8f2d2a7cb4
commit 68b03aee1a
25 changed files with 241 additions and 178 deletions

View File

@ -318,12 +318,17 @@ bool ConstantAggregateBuilder::split(size_t Index, CharUnits Hint) {
CharUnits Offset = Offsets[Index];
if (auto *CA = dyn_cast<llvm::ConstantAggregate>(C)) {
// Expand the sequence into its contained elements.
// FIXME: This assumes vector elements are byte-sized.
replace(Elems, Index, Index + 1,
llvm::map_range(llvm::seq(0u, CA->getNumOperands()),
[&](unsigned Op) { return CA->getOperand(Op); }));
if (auto *Seq = dyn_cast<llvm::SequentialType>(CA->getType())) {
if (isa<llvm::ArrayType>(CA->getType()) ||
isa<llvm::VectorType>(CA->getType())) {
// Array or vector.
CharUnits ElemSize = getSize(Seq->getElementType());
llvm::Type *ElemTy =
llvm::GetElementPtrInst::getTypeAtIndex(CA->getType(), (uint64_t)0);
CharUnits ElemSize = getSize(ElemTy);
replace(
Offsets, Index, Index + 1,
llvm::map_range(llvm::seq(0u, CA->getNumOperands()),
@ -344,6 +349,8 @@ bool ConstantAggregateBuilder::split(size_t Index, CharUnits Hint) {
}
if (auto *CDS = dyn_cast<llvm::ConstantDataSequential>(C)) {
// Expand the sequence into its contained elements.
// FIXME: This assumes vector elements are byte-sized.
// FIXME: If possible, split into two ConstantDataSequentials at Hint.
CharUnits ElemSize = getSize(CDS->getElementType());
replace(Elems, Index, Index + 1,
@ -359,6 +366,7 @@ bool ConstantAggregateBuilder::split(size_t Index, CharUnits Hint) {
}
if (isa<llvm::ConstantAggregateZero>(C)) {
// Split into two zeros at the hinted offset.
CharUnits ElemSize = getSize(C);
assert(Hint > Offset && Hint < Offset + ElemSize && "nothing to split");
replace(Elems, Index, Index + 1,
@ -368,6 +376,7 @@ bool ConstantAggregateBuilder::split(size_t Index, CharUnits Hint) {
}
if (isa<llvm::UndefValue>(C)) {
// Drop undef; it doesn't contribute to the final layout.
replace(Elems, Index, Index + 1, {});
replace(Offsets, Index, Index + 1, {});
return true;

View File

@ -44,7 +44,6 @@ namespace llvm {
class ArrayType;
class IntegerType;
class PointerType;
class SequentialType;
class StructType;
class VectorType;
template <class ConstantClass> struct ConstantAggrKeyType;
@ -631,12 +630,6 @@ public:
/// efficient as getElementAsInteger/Float/Double.
Constant *getElementAsConstant(unsigned i) const;
/// Specialize the getType() method to always return a SequentialType, which
/// reduces the amount of casting needed in parts of the compiler.
inline SequentialType *getType() const {
return cast<SequentialType>(Value::getType());
}
/// Return the element type of the array/vector.
Type *getElementType() const;

View File

@ -354,47 +354,22 @@ Type *Type::getStructElementType(unsigned N) const {
return cast<StructType>(this)->getElementType(N);
}
/// This is the superclass of the array and vector type classes. Both of these
/// represent "arrays" in memory. The array type represents a specifically sized
/// array, and the vector type represents a specifically sized array that allows
/// for use of SIMD instructions. SequentialType holds the common features of
/// both, which stem from the fact that both lay their components out in memory
/// identically.
class SequentialType : public Type {
Type *ContainedType; ///< Storage for the single contained type.
/// Class to represent array types.
class ArrayType : public Type {
/// The element type of the array.
Type *ContainedType;
/// Number of elements in the array.
uint64_t NumElements;
protected:
SequentialType(TypeID TID, Type *ElType, uint64_t NumElements)
: Type(ElType->getContext(), TID), ContainedType(ElType),
NumElements(NumElements) {
ContainedTys = &ContainedType;
NumContainedTys = 1;
}
public:
SequentialType(const SequentialType &) = delete;
SequentialType &operator=(const SequentialType &) = delete;
/// For scalable vectors, this will return the minimum number of elements
/// in the vector.
uint64_t getNumElements() const { return NumElements; }
Type *getElementType() const { return ContainedType; }
/// Methods for support type inquiry through isa, cast, and dyn_cast.
static bool classof(const Type *T) {
return T->getTypeID() == ArrayTyID || T->getTypeID() == VectorTyID;
}
};
/// Class to represent array types.
class ArrayType : public SequentialType {
ArrayType(Type *ElType, uint64_t NumEl);
public:
ArrayType(const ArrayType &) = delete;
ArrayType &operator=(const ArrayType &) = delete;
uint64_t getNumElements() const { return NumElements; }
Type *getElementType() const { return ContainedType; }
/// This static method is the primary way to construct an ArrayType
static ArrayType *get(Type *ElementType, uint64_t NumElements);
@ -412,7 +387,7 @@ uint64_t Type::getArrayNumElements() const {
}
/// Class to represent vector types.
class VectorType : public SequentialType {
class VectorType : public Type {
/// A fully specified VectorType is of the form <vscale x n x Ty>. 'n' is the
/// minimum number of elements of type Ty contained within the vector, and
/// 'vscale x' indicates that the total element count is an integer multiple
@ -426,18 +401,28 @@ class VectorType : public SequentialType {
/// <vscale x 4 x i32> - a vector containing an unknown integer multiple
/// of 4 i32s
/// The element type of the vector.
Type *ContainedType;
/// Minumum number of elements in the vector.
uint64_t NumElements;
VectorType(Type *ElType, unsigned NumEl, bool Scalable = false);
VectorType(Type *ElType, ElementCount EC);
// If true, the total number of elements is an unknown multiple of the
// minimum 'NumElements' from SequentialType. Otherwise the total number
// of elements is exactly equal to 'NumElements'.
// minimum 'NumElements'. Otherwise the total number of elements is exactly
// equal to 'NumElements'.
bool Scalable;
public:
VectorType(const VectorType &) = delete;
VectorType &operator=(const VectorType &) = delete;
/// For scalable vectors, this will return the minimum number of elements
/// in the vector.
uint64_t getNumElements() const { return NumElements; }
Type *getElementType() const { return ContainedType; }
/// This static method is the primary way to construct an VectorType.
static VectorType *get(Type *ElementType, ElementCount EC);
static VectorType *get(Type *ElementType, unsigned NumElements,

View File

@ -75,9 +75,15 @@ namespace llvm {
generic_gep_type_iterator& operator++() { // Preincrement
Type *Ty = getIndexedType();
if (auto *STy = dyn_cast<SequentialType>(Ty)) {
CurTy = STy->getElementType();
NumElements = STy->getNumElements();
if (auto *ATy = dyn_cast<ArrayType>(Ty)) {
CurTy = ATy->getElementType();
NumElements = ATy->getNumElements();
} else if (auto *VTy = dyn_cast<VectorType>(Ty)) {
CurTy = VTy->getElementType();
if (VTy->isScalable())
NumElements = Unbounded;
else
NumElements = VTy->getNumElements();
} else
CurTy = dyn_cast<StructType>(Ty);
++OpIt;

View File

@ -110,10 +110,6 @@ protected:
/// Float).
Type * const *ContainedTys = nullptr;
static bool isSequentialType(TypeID TyID) {
return TyID == ArrayTyID || TyID == VectorTyID;
}
public:
/// Print the current type.
/// Omit the type details if \p NoDetails == true.
@ -358,11 +354,6 @@ public:
inline unsigned getStructNumElements() const;
inline Type *getStructElementType(unsigned N) const;
inline Type *getSequentialElementType() const {
assert(isSequentialType(getTypeID()) && "Not a sequential type!");
return ContainedTys[0];
}
inline uint64_t getArrayNumElements() const;
Type *getArrayElementType() const {

View File

@ -1145,11 +1145,11 @@ static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
GEP1->getSourceElementType(), IntermediateIndices);
StructType *LastIndexedStruct = dyn_cast<StructType>(Ty);
if (isa<SequentialType>(Ty)) {
if (isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
// We know that:
// - both GEPs begin indexing from the exact same pointer;
// - the last indices in both GEPs are constants, indexing into a sequential
// type (array or pointer);
// type (array or vector);
// - both GEPs only index through arrays prior to that.
//
// Because array indices greater than the number of elements are valid in
@ -1157,8 +1157,8 @@ static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
// GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't
// partially overlap. We also need to check that the loaded size matches
// the element size, otherwise we could still have overlap.
const uint64_t ElementSize =
DL.getTypeStoreSize(cast<SequentialType>(Ty)->getElementType());
Type *LastElementTy = GetElementPtrInst::getTypeAtIndex(Ty, (uint64_t)0);
const uint64_t ElementSize = DL.getTypeStoreSize(LastElementTy);
if (V1Size != ElementSize || V2Size != ElementSize)
return MayAlias;

View File

@ -463,15 +463,18 @@ bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
isa<ConstantDataSequential>(C)) {
Type *EltTy = C->getType()->getSequentialElementType();
uint64_t NumElts;
Type *EltTy;
if (auto *AT = dyn_cast<ArrayType>(C->getType())) {
NumElts = AT->getNumElements();
EltTy = AT->getElementType();
} else {
NumElts = C->getType()->getVectorNumElements();
EltTy = C->getType()->getVectorElementType();
}
uint64_t EltSize = DL.getTypeAllocSize(EltTy);
uint64_t Index = ByteOffset / EltSize;
uint64_t Offset = ByteOffset - Index * EltSize;
uint64_t NumElts;
if (auto *AT = dyn_cast<ArrayType>(C->getType()))
NumElts = AT->getNumElements();
else
NumElts = C->getType()->getVectorNumElements();
for (; Index != NumElts; ++Index) {
if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
@ -936,11 +939,11 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
// Only handle pointers to sized types, not pointers to functions.
if (!Ty->isSized())
return nullptr;
} else if (auto *ATy = dyn_cast<SequentialType>(Ty)) {
Ty = ATy->getElementType();
} else {
// We've reached some non-indexable type.
break;
Type *NextTy = GetElementPtrInst::getTypeAtIndex(Ty, (uint64_t)0);
if (!NextTy)
break;
Ty = NextTy;
}
// Determine which element of the array the offset points into.

View File

@ -3555,7 +3555,7 @@ ScalarEvolution::getGEPExpr(GEPOperator *GEP,
CurTy = GEP->getSourceElementType();
FirstIter = false;
} else {
CurTy = cast<SequentialType>(CurTy)->getElementType();
CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0);
}
// For an array, add the element offset, explicitly scaled.
const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy);

View File

@ -2505,7 +2505,11 @@ Error BitcodeReader::parseConstants() {
if (Record.empty())
return error("Invalid record");
Type *EltTy = cast<SequentialType>(CurTy)->getElementType();
Type *EltTy;
if (auto *Array = dyn_cast<ArrayType>(CurTy))
EltTy = Array->getElementType();
else
EltTy = cast<VectorType>(CurTy)->getElementType();
if (EltTy->isIntegerTy(8)) {
SmallVector<uint8_t, 16> Elts(Record.begin(), Record.end());
if (isa<VectorType>(CurTy))

View File

@ -2423,7 +2423,7 @@ void ModuleBitcodeWriter::writeConstants(unsigned FirstVal, unsigned LastVal,
} else if (const ConstantDataSequential *CDS =
dyn_cast<ConstantDataSequential>(C)) {
Code = bitc::CST_CODE_DATA;
Type *EltTy = CDS->getType()->getElementType();
Type *EltTy = CDS->getElementType();
if (isa<IntegerType>(EltTy)) {
for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i)
Record.push_back(CDS->getElementAsInteger(i));

View File

@ -2473,8 +2473,8 @@ static void emitGlobalConstantDataSequential(const DataLayout &DL,
}
unsigned Size = DL.getTypeAllocSize(CDS->getType());
unsigned EmittedSize = DL.getTypeAllocSize(CDS->getType()->getElementType()) *
CDS->getNumElements();
unsigned EmittedSize =
DL.getTypeAllocSize(CDS->getElementType()) * CDS->getNumElements();
assert(EmittedSize <= Size && "Size cannot be less than EmittedSize!");
if (unsigned Padding = Size - EmittedSize)
AP.OutStreamer->emitZeros(Padding);

View File

@ -124,18 +124,9 @@ static Constant *FoldBitCast(Constant *V, Type *DestTy) {
Constant::getNullValue(Type::getInt32Ty(DPTy->getContext()));
IdxList.push_back(Zero);
Type *ElTy = PTy->getElementType();
while (ElTy != DPTy->getElementType()) {
if (StructType *STy = dyn_cast<StructType>(ElTy)) {
if (STy->getNumElements() == 0) break;
ElTy = STy->getElementType(0);
IdxList.push_back(Zero);
} else if (SequentialType *STy =
dyn_cast<SequentialType>(ElTy)) {
ElTy = STy->getElementType();
IdxList.push_back(Zero);
} else {
break;
}
while (ElTy && ElTy != DPTy->getElementType()) {
ElTy = GetElementPtrInst::getTypeAtIndex(ElTy, (uint64_t)0);
IdxList.push_back(Zero);
}
if (ElTy == DPTy->getElementType())
@ -954,7 +945,7 @@ Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg,
if (StructType *ST = dyn_cast<StructType>(Agg->getType()))
NumElts = ST->getNumElements();
else
NumElts = cast<SequentialType>(Agg->getType())->getNumElements();
NumElts = cast<ArrayType>(Agg->getType())->getNumElements();
SmallVector<Constant*, 32> Result;
for (unsigned i = 0; i != NumElts; ++i) {
@ -969,9 +960,7 @@ Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg,
if (StructType *ST = dyn_cast<StructType>(Agg->getType()))
return ConstantStruct::get(ST, Result);
if (ArrayType *AT = dyn_cast<ArrayType>(Agg->getType()))
return ConstantArray::get(AT, Result);
return ConstantVector::get(Result);
return ConstantArray::get(cast<ArrayType>(Agg->getType()), Result);
}
Constant *llvm::ConstantFoldUnaryInstruction(unsigned Opcode, Constant *C) {
@ -2451,12 +2440,12 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C,
// The verify makes sure that GEPs into a struct are in range.
continue;
}
auto *STy = cast<SequentialType>(Ty);
if (isa<VectorType>(STy)) {
if (isa<VectorType>(Ty)) {
// There can be awkward padding in after a non-power of two vector.
Unknown = true;
continue;
}
auto *STy = cast<ArrayType>(Ty);
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idxs[i])) {
if (isIndexInRangeOfArrayType(STy->getNumElements(), CI))
// It's in range, skip to the next index.

View File

@ -923,7 +923,9 @@ void ConstantFP::destroyConstantImpl() {
//===----------------------------------------------------------------------===//
Constant *ConstantAggregateZero::getSequentialElement() const {
return Constant::getNullValue(getType()->getSequentialElementType());
if (auto *AT = dyn_cast<ArrayType>(getType()))
return Constant::getNullValue(AT->getElementType());
return Constant::getNullValue(cast<VectorType>(getType())->getElementType());
}
Constant *ConstantAggregateZero::getStructElement(unsigned Elt) const {
@ -931,13 +933,13 @@ Constant *ConstantAggregateZero::getStructElement(unsigned Elt) const {
}
Constant *ConstantAggregateZero::getElementValue(Constant *C) const {
if (isa<SequentialType>(getType()))
if (isa<ArrayType>(getType()) || isa<VectorType>(getType()))
return getSequentialElement();
return getStructElement(cast<ConstantInt>(C)->getZExtValue());
}
Constant *ConstantAggregateZero::getElementValue(unsigned Idx) const {
if (isa<SequentialType>(getType()))
if (isa<ArrayType>(getType()) || isa<VectorType>(getType()))
return getSequentialElement();
return getStructElement(Idx);
}
@ -956,7 +958,9 @@ unsigned ConstantAggregateZero::getNumElements() const {
//===----------------------------------------------------------------------===//
UndefValue *UndefValue::getSequentialElement() const {
return UndefValue::get(getType()->getSequentialElementType());
if (ArrayType *ATy = dyn_cast<ArrayType>(getType()))
return UndefValue::get(ATy->getElementType());
return UndefValue::get(cast<VectorType>(getType())->getElementType());
}
UndefValue *UndefValue::getStructElement(unsigned Elt) const {
@ -964,21 +968,23 @@ UndefValue *UndefValue::getStructElement(unsigned Elt) const {
}
UndefValue *UndefValue::getElementValue(Constant *C) const {
if (isa<SequentialType>(getType()))
if (isa<ArrayType>(getType()) || isa<VectorType>(getType()))
return getSequentialElement();
return getStructElement(cast<ConstantInt>(C)->getZExtValue());
}
UndefValue *UndefValue::getElementValue(unsigned Idx) const {
if (isa<SequentialType>(getType()))
if (isa<ArrayType>(getType()) || isa<VectorType>(getType()))
return getSequentialElement();
return getStructElement(Idx);
}
unsigned UndefValue::getNumElements() const {
Type *Ty = getType();
if (auto *ST = dyn_cast<SequentialType>(Ty))
return ST->getNumElements();
if (auto *AT = dyn_cast<ArrayType>(Ty))
return AT->getNumElements();
if (auto *VT = dyn_cast<VectorType>(Ty))
return VT->getNumElements();
return Ty->getStructNumElements();
}
@ -2536,7 +2542,9 @@ Type *GetElementPtrConstantExpr::getResultElementType() const {
// ConstantData* implementations
Type *ConstantDataSequential::getElementType() const {
return getType()->getElementType();
if (ArrayType *ATy = dyn_cast<ArrayType>(getType()))
return ATy->getElementType();
return cast<VectorType>(getType())->getElementType();
}
StringRef ConstantDataSequential::getRawDataValues() const {
@ -2589,7 +2597,12 @@ static bool isAllZeros(StringRef Arr) {
/// the correct element type. We take the bytes in as a StringRef because
/// we *want* an underlying "char*" to avoid TBAA type punning violations.
Constant *ConstantDataSequential::getImpl(StringRef Elements, Type *Ty) {
assert(isElementTypeCompatible(Ty->getSequentialElementType()));
#ifndef NDEBUG
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty))
assert(isElementTypeCompatible(ATy->getElementType()));
else
assert(isElementTypeCompatible(cast<VectorType>(Ty)->getElementType()));
#endif
// If the elements are all zero or there are no elements, return a CAZ, which
// is more dense and canonical.
if (isAllZeros(Elements))

View File

@ -753,7 +753,9 @@ LLVMTypeRef LLVMGetElementType(LLVMTypeRef WrappedTy) {
auto *Ty = unwrap<Type>(WrappedTy);
if (auto *PTy = dyn_cast<PointerType>(Ty))
return wrap(PTy->getElementType());
return wrap(cast<SequentialType>(Ty)->getElementType());
if (auto *ATy = dyn_cast<ArrayType>(Ty))
return wrap(ATy->getElementType());
return wrap(cast<VectorType>(Ty)->getElementType());
}
unsigned LLVMGetNumContainedTypes(LLVMTypeRef Tp) {

View File

@ -553,7 +553,11 @@ bool StructType::indexValid(const Value *V) const {
//===----------------------------------------------------------------------===//
ArrayType::ArrayType(Type *ElType, uint64_t NumEl)
: SequentialType(ArrayTyID, ElType, NumEl) {}
: Type(ElType->getContext(), ArrayTyID), ContainedType(ElType),
NumElements(NumEl) {
ContainedTys = &ContainedType;
NumContainedTys = 1;
}
ArrayType *ArrayType::get(Type *ElementType, uint64_t NumElements) {
assert(isValidElementType(ElementType) && "Invalid type for array element!");
@ -580,7 +584,11 @@ bool ArrayType::isValidElementType(Type *ElemTy) {
//===----------------------------------------------------------------------===//
VectorType::VectorType(Type *ElType, ElementCount EC)
: SequentialType(VectorTyID, ElType, EC.Min), Scalable(EC.Scalable) {}
: Type(ElType->getContext(), VectorTyID), ContainedType(ElType),
NumElements(EC.Min), Scalable(EC.Scalable) {
ContainedTys = &ContainedType;
NumContainedTys = 1;
}
VectorType *VectorType::get(Type *ElementType, ElementCount EC) {
assert(EC.Min > 0 && "#Elements of a VectorType must be greater than 0");

View File

@ -173,9 +173,11 @@ bool TypeMapTy::areTypesIsomorphic(Type *DstTy, Type *SrcTy) {
if (DSTy->isLiteral() != SSTy->isLiteral() ||
DSTy->isPacked() != SSTy->isPacked())
return false;
} else if (auto *DSeqTy = dyn_cast<SequentialType>(DstTy)) {
if (DSeqTy->getNumElements() !=
cast<SequentialType>(SrcTy)->getNumElements())
} else if (auto *DArrTy = dyn_cast<ArrayType>(DstTy)) {
if (DArrTy->getNumElements() != cast<ArrayType>(SrcTy)->getNumElements())
return false;
} else if (auto *DVecTy = dyn_cast<VectorType>(DstTy)) {
if (DVecTy->getElementCount() != cast<VectorType>(SrcTy)->getElementCount())
return false;
}

View File

@ -364,8 +364,13 @@ static bool tryPromoteAllocaToVector(AllocaInst *Alloca) {
return false;
}
Type *AT = Alloca->getAllocatedType();
SequentialType *AllocaTy = dyn_cast<SequentialType>(AT);
Type *AllocaTy = Alloca->getAllocatedType();
VectorType *VectorTy = dyn_cast<VectorType>(AllocaTy);
if (auto *ArrayTy = dyn_cast<ArrayType>(AllocaTy)) {
if (VectorType::isValidElementType(ArrayTy->getElementType()) &&
ArrayTy->getNumElements() > 0)
VectorTy = arrayTypeToVecType(ArrayTy);
}
LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n");
@ -373,10 +378,8 @@ static bool tryPromoteAllocaToVector(AllocaInst *Alloca) {
// are just being conservative for now.
// FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these
// could also be promoted but we don't currently handle this case
if (!AllocaTy ||
AllocaTy->getNumElements() > 16 ||
AllocaTy->getNumElements() < 2 ||
!VectorType::isValidElementType(AllocaTy->getElementType())) {
if (!VectorTy || VectorTy->getNumElements() > 16 ||
VectorTy->getNumElements() < 2) {
LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n");
return false;
}
@ -412,10 +415,6 @@ static bool tryPromoteAllocaToVector(AllocaInst *Alloca) {
}
}
VectorType *VectorTy = dyn_cast<VectorType>(AllocaTy);
if (!VectorTy)
VectorTy = arrayTypeToVecType(cast<ArrayType>(AllocaTy));
LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> "
<< *VectorTy << '\n');
@ -424,7 +423,7 @@ static bool tryPromoteAllocaToVector(AllocaInst *Alloca) {
IRBuilder<> Builder(Inst);
switch (Inst->getOpcode()) {
case Instruction::Load: {
if (Inst->getType() == AT)
if (Inst->getType() == AllocaTy)
break;
Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS);
@ -440,7 +439,7 @@ static bool tryPromoteAllocaToVector(AllocaInst *Alloca) {
}
case Instruction::Store: {
StoreInst *SI = cast<StoreInst>(Inst);
if (SI->getValueOperand()->getType() == AT)
if (SI->getValueOperand()->getType() == AllocaTy)
break;
Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS);

View File

@ -204,17 +204,7 @@ namespace {
Type *next_type(Type *Ty, Value *Idx) {
if (auto *PTy = dyn_cast<PointerType>(Ty))
return PTy->getElementType();
// Advance the type.
if (!Ty->isStructTy()) {
Type *NexTy = cast<SequentialType>(Ty)->getElementType();
return NexTy;
}
// Otherwise it is a struct type.
ConstantInt *CI = dyn_cast<ConstantInt>(Idx);
assert(CI && "Struct type with non-constant index");
int64_t i = CI->getValue().getSExtValue();
Type *NextTy = cast<StructType>(Ty)->getElementType(i);
return NextTy;
return GetElementPtrInst::getTypeAtIndex(Ty, Idx);
}
raw_ostream &operator<< (raw_ostream &OS, const GepNode &GN) {

View File

@ -784,13 +784,18 @@ bool ArgumentPromotionPass::isDenselyPacked(Type *type, const DataLayout &DL) {
if (DL.getTypeSizeInBits(type) != DL.getTypeAllocSizeInBits(type))
return false;
if (!isa<StructType>(type) && !isa<SequentialType>(type))
return true;
// For homogenous sequential types, check for padding within members.
if (SequentialType *seqTy = dyn_cast<SequentialType>(type))
// FIXME: This isn't the right way to check for padding in vectors with
// non-byte-size elements.
if (VectorType *seqTy = dyn_cast<VectorType>(type))
return isDenselyPacked(seqTy->getElementType(), DL);
// For array types, check for padding within members.
if (ArrayType *seqTy = dyn_cast<ArrayType>(type))
return isDenselyPacked(seqTy->getElementType(), DL);
if (!isa<StructType>(type))
return true;
// Check for padding within and between elements of a struct.
StructType *StructTy = cast<StructType>(type);
const StructLayout *Layout = DL.getStructLayout(StructTy);

View File

@ -128,13 +128,15 @@ static bool isLeakCheckerRoot(GlobalVariable *GV) {
Type *Ty = Types.pop_back_val();
switch (Ty->getTypeID()) {
default: break;
case Type::PointerTyID: return true;
case Type::ArrayTyID:
case Type::VectorTyID: {
SequentialType *STy = cast<SequentialType>(Ty);
Types.push_back(STy->getElementType());
case Type::PointerTyID:
return true;
case Type::VectorTyID:
if (cast<VectorType>(Ty)->getElementType()->isPointerTy())
return true;
break;
case Type::ArrayTyID:
Types.push_back(cast<ArrayType>(Ty)->getElementType());
break;
}
case Type::StructTyID: {
StructType *STy = cast<StructType>(Ty);
if (STy->isOpaque()) return true;
@ -142,7 +144,8 @@ static bool isLeakCheckerRoot(GlobalVariable *GV) {
E = STy->element_end(); I != E; ++I) {
Type *InnerTy = *I;
if (isa<PointerType>(InnerTy)) return true;
if (isa<StructType>(InnerTy) || isa<SequentialType>(InnerTy))
if (isa<StructType>(InnerTy) || isa<ArrayType>(InnerTy) ||
isa<VectorType>(InnerTy))
Types.push_back(InnerTy);
}
break;
@ -433,13 +436,27 @@ static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
return true;
}
static bool IsSRASequential(Type *T) {
return isa<ArrayType>(T) || isa<VectorType>(T);
}
static uint64_t GetSRASequentialNumElements(Type *T) {
if (ArrayType *AT = dyn_cast<ArrayType>(T))
return AT->getNumElements();
return cast<VectorType>(T)->getNumElements();
}
static Type *GetSRASequentialElementType(Type *T) {
if (ArrayType *AT = dyn_cast<ArrayType>(T))
return AT->getElementType();
return cast<VectorType>(T)->getElementType();
}
static bool CanDoGlobalSRA(GlobalVariable *GV) {
Constant *Init = GV->getInitializer();
if (isa<StructType>(Init->getType())) {
// nothing to check
} else if (SequentialType *STy = dyn_cast<SequentialType>(Init->getType())) {
if (STy->getNumElements() > 16 && GV->hasNUsesOrMore(16))
} else if (IsSRASequential(Init->getType())) {
if (GetSRASequentialNumElements(Init->getType()) > 16 &&
GV->hasNUsesOrMore(16))
return false; // It's not worth it.
} else
return false;
@ -509,8 +526,8 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
Type *ElTy = nullptr;
if (StructType *STy = dyn_cast<StructType>(Ty))
ElTy = STy->getElementType(ElementIdx);
else if (SequentialType *STy = dyn_cast<SequentialType>(Ty))
ElTy = STy->getElementType();
else
ElTy = GetSRASequentialElementType(Ty);
assert(ElTy);
Constant *In = Init->getAggregateElement(ElementIdx);
@ -541,7 +558,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
uint64_t FragmentOffsetInBits = Layout.getElementOffsetInBits(ElementIdx);
transferSRADebugInfo(GV, NGV, FragmentOffsetInBits, Size,
STy->getNumElements());
} else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) {
} else {
uint64_t EltSize = DL.getTypeAllocSize(ElTy);
Align EltAlign(DL.getABITypeAlignment(ElTy));
uint64_t FragmentSizeInBits = DL.getTypeAllocSizeInBits(ElTy);
@ -553,7 +570,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
if (NewAlign > EltAlign)
NGV->setAlignment(NewAlign);
transferSRADebugInfo(GV, NGV, FragmentSizeInBits * ElementIdx,
FragmentSizeInBits, STy->getNumElements());
FragmentSizeInBits, GetSRASequentialNumElements(Ty));
}
}
@ -2424,8 +2441,11 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
}
ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo));
SequentialType *InitTy = cast<SequentialType>(Init->getType());
uint64_t NumElts = InitTy->getNumElements();
uint64_t NumElts;
if (ArrayType *ATy = dyn_cast<ArrayType>(Init->getType()))
NumElts = ATy->getNumElements();
else
NumElts = cast<VectorType>(Init->getType())->getNumElements();
// Break up the array into elements.
for (uint64_t i = 0, e = NumElts; i != e; ++i)
@ -2436,7 +2456,7 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1);
if (Init->getType()->isArrayTy())
return ConstantArray::get(cast<ArrayType>(InitTy), Elts);
return ConstantArray::get(cast<ArrayType>(Init->getType()), Elts);
return ConstantVector::get(Elts);
}
@ -2558,8 +2578,10 @@ static void BatchCommitValueTo(const DenseMap<Constant*, Constant*> &Mem) {
unsigned NumElts;
if (auto *STy = dyn_cast<StructType>(Ty))
NumElts = STy->getNumElements();
else if (auto *ATy = dyn_cast<ArrayType>(Ty))
NumElts = ATy->getNumElements();
else
NumElts = cast<SequentialType>(Ty)->getNumElements();
NumElts = cast<VectorType>(Ty)->getNumElements();
for (unsigned i = 0, e = NumElts; i != e; ++i)
Elts.push_back(Init->getAggregateElement(i));
}

View File

@ -2132,7 +2132,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Type *Ty = ConstArg->getType();
if (Ty->isVectorTy()) {
unsigned NumElements = Ty->getVectorNumElements();
Type *EltTy = Ty->getSequentialElementType();
Type *EltTy = Ty->getVectorElementType();
SmallVector<Constant *, 16> Elements;
for (unsigned Idx = 0; Idx < NumElements; ++Idx) {
if (ConstantInt *Elt =

View File

@ -3532,11 +3532,22 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset,
(DL.getTypeAllocSize(Ty).getFixedSize() - Offset) < Size)
return nullptr;
if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) {
Type *ElementTy = SeqTy->getElementType();
if (isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
Type *ElementTy;
uint64_t TyNumElements;
if (auto *AT = dyn_cast<ArrayType>(Ty)) {
ElementTy = AT->getElementType();
TyNumElements = AT->getNumElements();
} else {
// FIXME: This isn't right for vectors with non-byte-sized or
// non-power-of-two sized elements.
auto *VT = cast<VectorType>(Ty);
ElementTy = VT->getElementType();
TyNumElements = VT->getNumElements();
}
uint64_t ElementSize = DL.getTypeAllocSize(ElementTy).getFixedSize();
uint64_t NumSkippedElements = Offset / ElementSize;
if (NumSkippedElements >= SeqTy->getNumElements())
if (NumSkippedElements >= TyNumElements)
return nullptr;
Offset -= NumSkippedElements * ElementSize;

View File

@ -476,14 +476,24 @@ int FunctionComparator::cmpTypes(Type *TyL, Type *TyR) const {
return 0;
}
case Type::ArrayTyID:
case Type::VectorTyID: {
auto *STyL = cast<SequentialType>(TyL);
auto *STyR = cast<SequentialType>(TyR);
case Type::ArrayTyID: {
auto *STyL = cast<ArrayType>(TyL);
auto *STyR = cast<ArrayType>(TyR);
if (STyL->getNumElements() != STyR->getNumElements())
return cmpNumbers(STyL->getNumElements(), STyR->getNumElements());
return cmpTypes(STyL->getElementType(), STyR->getElementType());
}
case Type::VectorTyID: {
auto *STyL = cast<VectorType>(TyL);
auto *STyR = cast<VectorType>(TyR);
if (STyL->getElementCount().Scalable != STyR->getElementCount().Scalable)
return cmpNumbers(STyL->getElementCount().Scalable,
STyR->getElementCount().Scalable);
if (STyL->getElementCount().Min != STyR->getElementCount().Min)
return cmpNumbers(STyL->getElementCount().Min,
STyR->getElementCount().Min);
return cmpTypes(STyL->getElementType(), STyR->getElementType());
}
}
}

View File

@ -3131,7 +3131,8 @@ unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const {
unsigned N = 1;
Type *EltTy = T;
while (isa<StructType>(EltTy) || isa<SequentialType>(EltTy)) {
while (isa<StructType>(EltTy) || isa<ArrayType>(EltTy) ||
isa<VectorType>(EltTy)) {
if (auto *ST = dyn_cast<StructType>(EltTy)) {
// Check that struct is homogeneous.
for (const auto *Ty : ST->elements())
@ -3139,10 +3140,13 @@ unsigned BoUpSLP::canMapToVector(Type *T, const DataLayout &DL) const {
return 0;
N *= ST->getNumElements();
EltTy = *ST->element_begin();
} else if (auto *AT = dyn_cast<ArrayType>(EltTy)) {
N *= AT->getNumElements();
EltTy = AT->getElementType();
} else {
auto *SeqT = cast<SequentialType>(EltTy);
N *= SeqT->getNumElements();
EltTy = SeqT->getElementType();
auto *VT = cast<VectorType>(EltTy);
N *= VT->getNumElements();
EltTy = VT->getElementType();
}
}

View File

@ -51,12 +51,16 @@ buildSequentialConstant(ArrayRef<llvm::Constant *> &constants,
return result;
}
if (!isa<llvm::SequentialType>(type)) {
llvm::Type *elementType;
if (auto *arrayTy = dyn_cast<llvm::ArrayType>(type)) {
elementType = arrayTy->getElementType();
} else if (auto *vectorTy = dyn_cast<llvm::VectorType>(type)) {
elementType = vectorTy->getElementType();
} else {
emitError(loc) << "expected sequential LLVM types wrapping a scalar";
return nullptr;
}
llvm::Type *elementType = type->getSequentialElementType();
SmallVector<llvm::Constant *, 8> nested;
nested.reserve(shape.front());
for (int64_t i = 0; i < shape.front(); ++i) {
@ -74,9 +78,15 @@ buildSequentialConstant(ArrayRef<llvm::Constant *> &constants,
/// Returns the first non-sequential type nested in sequential types.
static llvm::Type *getInnermostElementType(llvm::Type *type) {
while (isa<llvm::SequentialType>(type))
type = type->getSequentialElementType();
return type;
do {
if (auto *arrayTy = dyn_cast<llvm::ArrayType>(type)) {
type = arrayTy->getElementType();
} else if (auto *vectorTy = dyn_cast<llvm::VectorType>(type)) {
type = vectorTy->getElementType();
} else {
return type;
}
} while (1);
}
/// Create an LLVM IR constant of `llvmType` from the MLIR attribute `attr`.
@ -106,17 +116,24 @@ llvm::Constant *ModuleTranslation::getLLVMConstant(llvm::Type *llvmType,
return llvm::ConstantExpr::getBitCast(
functionMapping.lookup(funcAttr.getValue()), llvmType);
if (auto splatAttr = attr.dyn_cast<SplatElementsAttr>()) {
auto *sequentialType = cast<llvm::SequentialType>(llvmType);
auto *elementType = sequentialType->getElementType();
uint64_t numElements = sequentialType->getNumElements();
llvm::Type *elementType;
uint64_t numElements;
if (auto *arrayTy = dyn_cast<llvm::ArrayType>(llvmType)) {
elementType = arrayTy->getElementType();
numElements = arrayTy->getNumElements();
} else {
auto *vectorTy = cast<llvm::VectorType>(llvmType);
elementType = vectorTy->getElementType();
numElements = vectorTy->getNumElements();
}
// Splat value is a scalar. Extract it only if the element type is not
// another sequence type. The recursion terminates because each step removes
// one outer sequential type.
bool elementTypeSequential =
isa<llvm::ArrayType>(elementType) || isa<llvm::VectorType>(elementType);
llvm::Constant *child = getLLVMConstant(
elementType,
isa<llvm::SequentialType>(elementType) ? splatAttr
: splatAttr.getSplatValue(),
loc);
elementTypeSequential ? splatAttr : splatAttr.getSplatValue(), loc);
if (!child)
return nullptr;
if (llvmType->isVectorTy())