forked from OSchip/llvm-project
Migrate VectorOrTensorType/MemRefType shape api to use int64_t instead of int.
PiperOrigin-RevId: 230605756
This commit is contained in:
parent
b64998a6b3
commit
6859f33292
|
@ -81,7 +81,7 @@ struct MemRefRegion {
|
|||
/// bounds major to minor. We use int64_t instead of uint64_t since index
|
||||
/// types can be at most int64_t.
|
||||
Optional<int64_t> getConstantBoundingSizeAndShape(
|
||||
SmallVectorImpl<int> *shape = nullptr,
|
||||
SmallVectorImpl<int64_t> *shape = nullptr,
|
||||
std::vector<SmallVector<int64_t, 4>> *lbs = nullptr,
|
||||
SmallVectorImpl<int64_t> *lbDivisors = nullptr) const;
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ class VectorType;
|
|||
/// - shapeRatio({3, 4, 4, 8}, {2, 5, 2}) returns None
|
||||
/// - shapeRatio({1, 2, 10, 32}, {2, 5, 2}) returns {1, 1, 2, 16}
|
||||
llvm::Optional<llvm::SmallVector<unsigned, 4>>
|
||||
shapeRatio(ArrayRef<int> superShape, ArrayRef<int> subShape);
|
||||
shapeRatio(ArrayRef<int64_t> superShape, ArrayRef<int64_t> subShape);
|
||||
|
||||
/// Computes and returns the multi-dimensional ratio of the shapes of
|
||||
/// `superVector` to `subVector`. If integral division is not possible, returns
|
||||
|
|
|
@ -80,11 +80,11 @@ public:
|
|||
IntegerType getI1Type();
|
||||
IntegerType getIntegerType(unsigned width);
|
||||
FunctionType getFunctionType(ArrayRef<Type> inputs, ArrayRef<Type> results);
|
||||
MemRefType getMemRefType(ArrayRef<int> shape, Type elementType,
|
||||
MemRefType getMemRefType(ArrayRef<int64_t> shape, Type elementType,
|
||||
ArrayRef<AffineMap> affineMapComposition = {},
|
||||
unsigned memorySpace = 0);
|
||||
VectorType getVectorType(ArrayRef<int> shape, Type elementType);
|
||||
RankedTensorType getTensorType(ArrayRef<int> shape, Type elementType);
|
||||
VectorType getVectorType(ArrayRef<int64_t> shape, Type elementType);
|
||||
RankedTensorType getTensorType(ArrayRef<int64_t> shape, Type elementType);
|
||||
UnrankedTensorType getTensorType(Type elementType);
|
||||
|
||||
/// Get or construct an instance of the type 'ty' with provided arguments.
|
||||
|
|
|
@ -186,11 +186,11 @@ public:
|
|||
|
||||
/// If this is ranked tensor or vector type, return the rank. If it is an
|
||||
/// unranked tensor, return -1.
|
||||
int getRank() const;
|
||||
int64_t getRank() const;
|
||||
|
||||
/// If this is ranked tensor or vector type, return the shape. If it is an
|
||||
/// unranked tensor, abort.
|
||||
ArrayRef<int> getShape() const;
|
||||
ArrayRef<int64_t> getShape() const;
|
||||
|
||||
/// If this is unranked tensor or any dimension has unknown size (<0),
|
||||
/// it doesn't have static shape. If all dimensions have known size (>= 0),
|
||||
|
@ -200,7 +200,7 @@ public:
|
|||
/// If this is ranked tensor or vector type, return the size of the specified
|
||||
/// dimension. It aborts if the tensor is unranked (this can be checked by
|
||||
/// the getRank call method).
|
||||
int getDimSize(unsigned i) const;
|
||||
int64_t getDimSize(unsigned i) const;
|
||||
|
||||
/// Get the total amount of bits occupied by a value of this type. This does
|
||||
/// not take into account any memory layout or widening constraints, e.g. a
|
||||
|
@ -208,7 +208,7 @@ public:
|
|||
/// it will likely be stored as in a 4xi64 vector register. Fail an assertion
|
||||
/// if the size cannot be computed statically, i.e. if the tensor has a
|
||||
/// dynamic shape or if its elemental type does not have a known bit width.
|
||||
long getSizeInBits() const;
|
||||
int64_t getSizeInBits() const;
|
||||
|
||||
/// Methods for support type inquiry through isa, cast, and dyn_cast.
|
||||
static bool kindof(unsigned kind) {
|
||||
|
@ -227,26 +227,26 @@ public:
|
|||
|
||||
/// Get or create a new VectorType of the provided shape and element type.
|
||||
/// Assumes the arguments define a well-formed VectorType.
|
||||
static VectorType get(ArrayRef<int> shape, Type elementType);
|
||||
static VectorType get(ArrayRef<int64_t> shape, Type elementType);
|
||||
|
||||
/// Get or create a new VectorType of the provided shape and element type
|
||||
/// declared at the given, potentially unknown, location. If the VectorType
|
||||
/// defined by the arguments would be ill-formed, emit errors and return
|
||||
/// nullptr-wrapping type.
|
||||
static VectorType getChecked(ArrayRef<int> shape, Type elementType,
|
||||
static VectorType getChecked(ArrayRef<int64_t> shape, Type elementType,
|
||||
Location location);
|
||||
|
||||
/// Verify the construction of a vector type.
|
||||
static bool verifyConstructionInvariants(llvm::Optional<Location> loc,
|
||||
MLIRContext *context,
|
||||
ArrayRef<int> shape,
|
||||
ArrayRef<int64_t> shape,
|
||||
Type elementType);
|
||||
|
||||
/// Returns true of the given type can be used as an element of a vector type.
|
||||
/// In particular, vectors can consist of integer or float primitives.
|
||||
static bool isValidElementType(Type t) { return t.isIntOrFloat(); }
|
||||
|
||||
ArrayRef<int> getShape() const;
|
||||
ArrayRef<int64_t> getShape() const;
|
||||
|
||||
/// Methods for support type inquiry through isa, cast, and dyn_cast.
|
||||
static bool kindof(unsigned kind) { return kind == StandardTypes::Vector; }
|
||||
|
@ -290,22 +290,22 @@ public:
|
|||
|
||||
/// Get or create a new RankedTensorType of the provided shape and element
|
||||
/// type. Assumes the arguments define a well-formed type.
|
||||
static RankedTensorType get(ArrayRef<int> shape, Type elementType);
|
||||
static RankedTensorType get(ArrayRef<int64_t> shape, Type elementType);
|
||||
|
||||
/// Get or create a new RankedTensorType of the provided shape and element
|
||||
/// type declared at the given, potentially unknown, location. If the
|
||||
/// RankedTensorType defined by the arguments would be ill-formed, emit errors
|
||||
/// and return a nullptr-wrapping type.
|
||||
static RankedTensorType getChecked(ArrayRef<int> shape, Type elementType,
|
||||
static RankedTensorType getChecked(ArrayRef<int64_t> shape, Type elementType,
|
||||
Location location);
|
||||
|
||||
/// Verify the construction of a ranked tensor type.
|
||||
static bool verifyConstructionInvariants(llvm::Optional<Location> loc,
|
||||
MLIRContext *context,
|
||||
ArrayRef<int> shape,
|
||||
ArrayRef<int64_t> shape,
|
||||
Type elementType);
|
||||
|
||||
ArrayRef<int> getShape() const;
|
||||
ArrayRef<int64_t> getShape() const;
|
||||
|
||||
static bool kindof(unsigned kind) {
|
||||
return kind == StandardTypes::RankedTensor;
|
||||
|
@ -338,7 +338,7 @@ public:
|
|||
MLIRContext *context,
|
||||
Type elementType);
|
||||
|
||||
ArrayRef<int> getShape() const { return ArrayRef<int>(); }
|
||||
ArrayRef<int64_t> getShape() const { return llvm::None; }
|
||||
|
||||
static bool kindof(unsigned kind) {
|
||||
return kind == StandardTypes::UnrankedTensor;
|
||||
|
@ -361,7 +361,7 @@ public:
|
|||
/// map composition, and memory space. Assumes the arguments define a
|
||||
/// well-formed MemRef type. Use getChecked to gracefully handle MemRefType
|
||||
/// construction failures.
|
||||
static MemRefType get(ArrayRef<int> shape, Type elementType,
|
||||
static MemRefType get(ArrayRef<int64_t> shape, Type elementType,
|
||||
ArrayRef<AffineMap> affineMapComposition,
|
||||
unsigned memorySpace) {
|
||||
auto result = getImpl(shape, elementType, affineMapComposition, memorySpace,
|
||||
|
@ -376,7 +376,7 @@ public:
|
|||
/// UnknownLoc. If the MemRefType defined by the arguments would be
|
||||
/// ill-formed, emits errors (to the handler registered with the context or to
|
||||
/// the error stream) and returns nullptr.
|
||||
static MemRefType getChecked(ArrayRef<int> shape, Type elementType,
|
||||
static MemRefType getChecked(ArrayRef<int64_t> shape, Type elementType,
|
||||
ArrayRef<AffineMap> affineMapComposition,
|
||||
unsigned memorySpace, Location location) {
|
||||
return getImpl(shape, elementType, affineMapComposition, memorySpace,
|
||||
|
@ -386,10 +386,10 @@ public:
|
|||
unsigned getRank() const { return getShape().size(); }
|
||||
|
||||
/// Returns an array of memref shape dimension sizes.
|
||||
ArrayRef<int> getShape() const;
|
||||
ArrayRef<int64_t> getShape() const;
|
||||
|
||||
/// Return the size of the specified dimension, or -1 if unspecified.
|
||||
int getDimSize(unsigned i) const { return getShape()[i]; }
|
||||
int64_t getDimSize(unsigned i) const { return getShape()[i]; }
|
||||
|
||||
/// Returns the elemental type for this memref shape.
|
||||
Type getElementType() const;
|
||||
|
@ -404,6 +404,10 @@ public:
|
|||
/// Returns the number of dimensions with dynamic size.
|
||||
unsigned getNumDynamicDims() const;
|
||||
|
||||
/// If any dimension of the shape has unknown size (<0), it doesn't have
|
||||
/// static shape.
|
||||
bool hasStaticShape() const { return getNumDynamicDims() == 0; }
|
||||
|
||||
static bool kindof(unsigned kind) { return kind == StandardTypes::MemRef; }
|
||||
|
||||
/// Unique identifier for this type class.
|
||||
|
@ -413,7 +417,7 @@ private:
|
|||
/// Get or create a new MemRefType defined by the arguments. If the resulting
|
||||
/// type would be ill-formed, return nullptr. If the location is provided,
|
||||
/// emit detailed error messages.
|
||||
static MemRefType getImpl(ArrayRef<int> shape, Type elementType,
|
||||
static MemRefType getImpl(ArrayRef<int64_t> shape, Type elementType,
|
||||
ArrayRef<AffineMap> affineMapComposition,
|
||||
unsigned memorySpace, Optional<Location> location);
|
||||
};
|
||||
|
|
|
@ -56,7 +56,7 @@ unsigned MemRefRegion::getRank() const {
|
|||
}
|
||||
|
||||
Optional<int64_t> MemRefRegion::getConstantBoundingSizeAndShape(
|
||||
SmallVectorImpl<int> *shape, std::vector<SmallVector<int64_t, 4>> *lbs,
|
||||
SmallVectorImpl<int64_t> *shape, std::vector<SmallVector<int64_t, 4>> *lbs,
|
||||
SmallVectorImpl<int64_t> *lbDivisors) const {
|
||||
auto memRefType = memref->getType().cast<MemRefType>();
|
||||
unsigned rank = memRefType.getRank();
|
||||
|
@ -289,7 +289,7 @@ bool mlir::boundCheckLoadOrStoreOp(LoadOrStoreOpPointer loadOrStoreOp,
|
|||
// of upper and out of lower), and check if the constraint system is
|
||||
// feasible. If it is, there is at least one point out of bounds.
|
||||
SmallVector<int64_t, 4> ineq(rank + 1, 0);
|
||||
int dimSize = loadOrStoreOp->getMemRefType().getDimSize(r);
|
||||
int64_t dimSize = loadOrStoreOp->getMemRefType().getDimSize(r);
|
||||
// TODO(bondhugula): handle dynamic dim sizes.
|
||||
if (dimSize == -1)
|
||||
continue;
|
||||
|
|
|
@ -37,8 +37,8 @@ using namespace mlir;
|
|||
|
||||
using llvm::SetVector;
|
||||
|
||||
Optional<SmallVector<unsigned, 4>> mlir::shapeRatio(ArrayRef<int> superShape,
|
||||
ArrayRef<int> subShape) {
|
||||
Optional<SmallVector<unsigned, 4>>
|
||||
mlir::shapeRatio(ArrayRef<int64_t> superShape, ArrayRef<int64_t> subShape) {
|
||||
if (superShape.size() < subShape.size()) {
|
||||
return Optional<SmallVector<unsigned, 4>>();
|
||||
}
|
||||
|
@ -55,8 +55,8 @@ Optional<SmallVector<unsigned, 4>> mlir::shapeRatio(ArrayRef<int> superShape,
|
|||
result.push_back(superSize / subSize);
|
||||
};
|
||||
functional::zipApply(
|
||||
divide, SmallVector<int, 8>{superShape.rbegin(), superShape.rend()},
|
||||
SmallVector<int, 8>{subShape.rbegin(), subShape.rend()});
|
||||
divide, SmallVector<int64_t, 8>{superShape.rbegin(), superShape.rend()},
|
||||
SmallVector<int64_t, 8>{subShape.rbegin(), subShape.rend()});
|
||||
|
||||
// If integral division does not occur, return and let the caller decide.
|
||||
if (!divides) {
|
||||
|
|
|
@ -86,7 +86,7 @@ Type OpTrait::util::getBroadcastedType(Type type1, Type type2) {
|
|||
}
|
||||
|
||||
// Returns the shape of the given type.
|
||||
auto getShape = [](Type type) -> ArrayRef<int> {
|
||||
auto getShape = [](Type type) -> ArrayRef<int64_t> {
|
||||
if (auto vtType = type.dyn_cast<VectorOrTensorType>())
|
||||
return vtType.getShape();
|
||||
return {};
|
||||
|
@ -104,7 +104,7 @@ Type OpTrait::util::getBroadcastedType(Type type1, Type type2) {
|
|||
// The result shape has the maximum among the two inputs at every
|
||||
// dimension index.
|
||||
|
||||
SmallVector<int, 4> resultShape;
|
||||
SmallVector<int64_t, 4> resultShape;
|
||||
if (shape1.size() > shape2.size()) {
|
||||
std::copy(shape1.begin(), shape1.end(), std::back_inserter(resultShape));
|
||||
} else {
|
||||
|
|
|
@ -168,7 +168,7 @@ Attribute DenseElementsAttr::getValue(ArrayRef<uint64_t> index) const {
|
|||
// Reduce the provided multidimensional index into a 1D index.
|
||||
uint64_t valueIndex = 0;
|
||||
uint64_t dimMultiplier = 1;
|
||||
for (int i = rank - 1; i >= 0; --i) {
|
||||
for (auto i = rank - 1; i >= 0; --i) {
|
||||
valueIndex += index[i] * dimMultiplier;
|
||||
dimMultiplier *= shape[i];
|
||||
}
|
||||
|
@ -346,7 +346,7 @@ Attribute SparseElementsAttr::getValue(ArrayRef<uint64_t> index) const {
|
|||
|
||||
// Build a mapping between known indices and the offset of the stored element.
|
||||
llvm::SmallDenseMap<llvm::ArrayRef<uint64_t>, size_t> mappedIndices;
|
||||
size_t numSparseIndices = sparseIndices.getType().getDimSize(0);
|
||||
auto numSparseIndices = sparseIndices.getType().getDimSize(0);
|
||||
for (size_t i = 0, e = numSparseIndices; i != e; ++i)
|
||||
mappedIndices.try_emplace(
|
||||
{sparseIndexValues + (i * rank), static_cast<size_t>(rank)}, i);
|
||||
|
|
|
@ -77,17 +77,18 @@ FunctionType Builder::getFunctionType(ArrayRef<Type> inputs,
|
|||
return FunctionType::get(inputs, results, context);
|
||||
}
|
||||
|
||||
MemRefType Builder::getMemRefType(ArrayRef<int> shape, Type elementType,
|
||||
MemRefType Builder::getMemRefType(ArrayRef<int64_t> shape, Type elementType,
|
||||
ArrayRef<AffineMap> affineMapComposition,
|
||||
unsigned memorySpace) {
|
||||
return MemRefType::get(shape, elementType, affineMapComposition, memorySpace);
|
||||
}
|
||||
|
||||
VectorType Builder::getVectorType(ArrayRef<int> shape, Type elementType) {
|
||||
VectorType Builder::getVectorType(ArrayRef<int64_t> shape, Type elementType) {
|
||||
return VectorType::get(shape, elementType);
|
||||
}
|
||||
|
||||
RankedTensorType Builder::getTensorType(ArrayRef<int> shape, Type elementType) {
|
||||
RankedTensorType Builder::getTensorType(ArrayRef<int64_t> shape,
|
||||
Type elementType) {
|
||||
return RankedTensorType::get(shape, elementType);
|
||||
}
|
||||
|
||||
|
|
|
@ -112,6 +112,7 @@ unsigned VectorOrTensorType::getNumElements() const {
|
|||
switch (getKind()) {
|
||||
case StandardTypes::Vector:
|
||||
case StandardTypes::RankedTensor: {
|
||||
assert(hasStaticShape() && "expected type to have static shape");
|
||||
auto shape = getShape();
|
||||
unsigned num = 1;
|
||||
for (auto dim : shape)
|
||||
|
@ -125,7 +126,7 @@ unsigned VectorOrTensorType::getNumElements() const {
|
|||
|
||||
/// If this is ranked tensor or vector type, return the rank. If it is an
|
||||
/// unranked tensor, return -1.
|
||||
int VectorOrTensorType::getRank() const {
|
||||
int64_t VectorOrTensorType::getRank() const {
|
||||
switch (getKind()) {
|
||||
case StandardTypes::Vector:
|
||||
case StandardTypes::RankedTensor:
|
||||
|
@ -137,7 +138,7 @@ int VectorOrTensorType::getRank() const {
|
|||
}
|
||||
}
|
||||
|
||||
int VectorOrTensorType::getDimSize(unsigned i) const {
|
||||
int64_t VectorOrTensorType::getDimSize(unsigned i) const {
|
||||
switch (getKind()) {
|
||||
case StandardTypes::Vector:
|
||||
case StandardTypes::RankedTensor:
|
||||
|
@ -150,7 +151,7 @@ int VectorOrTensorType::getDimSize(unsigned i) const {
|
|||
// Get the number of number of bits require to store a value of the given vector
|
||||
// or tensor types. Compute the value recursively since tensors are allowed to
|
||||
// have vectors as elements.
|
||||
long VectorOrTensorType::getSizeInBits() const {
|
||||
int64_t VectorOrTensorType::getSizeInBits() const {
|
||||
assert(hasStaticShape() &&
|
||||
"cannot get the bit size of an aggregate with a dynamic shape");
|
||||
|
||||
|
@ -165,7 +166,7 @@ long VectorOrTensorType::getSizeInBits() const {
|
|||
return getNumElements() * elementVectorOrTensorType.getSizeInBits();
|
||||
}
|
||||
|
||||
ArrayRef<int> VectorOrTensorType::getShape() const {
|
||||
ArrayRef<int64_t> VectorOrTensorType::getShape() const {
|
||||
switch (getKind()) {
|
||||
case StandardTypes::Vector:
|
||||
return cast<VectorType>().getShape();
|
||||
|
@ -179,18 +180,17 @@ ArrayRef<int> VectorOrTensorType::getShape() const {
|
|||
bool VectorOrTensorType::hasStaticShape() const {
|
||||
if (isa<UnrankedTensorType>())
|
||||
return false;
|
||||
auto dims = getShape();
|
||||
return !std::any_of(dims.begin(), dims.end(), [](int i) { return i < 0; });
|
||||
return llvm::none_of(getShape(), [](int64_t i) { return i < 0; });
|
||||
}
|
||||
|
||||
/// VectorType
|
||||
|
||||
VectorType VectorType::get(ArrayRef<int> shape, Type elementType) {
|
||||
VectorType VectorType::get(ArrayRef<int64_t> shape, Type elementType) {
|
||||
return Base::get(elementType.getContext(), StandardTypes::Vector, shape,
|
||||
elementType);
|
||||
}
|
||||
|
||||
VectorType VectorType::getChecked(ArrayRef<int> shape, Type elementType,
|
||||
VectorType VectorType::getChecked(ArrayRef<int64_t> shape, Type elementType,
|
||||
Location location) {
|
||||
return Base::getChecked(location, elementType.getContext(),
|
||||
StandardTypes::Vector, shape, elementType);
|
||||
|
@ -198,7 +198,7 @@ VectorType VectorType::getChecked(ArrayRef<int> shape, Type elementType,
|
|||
|
||||
bool VectorType::verifyConstructionInvariants(llvm::Optional<Location> loc,
|
||||
MLIRContext *context,
|
||||
ArrayRef<int> shape,
|
||||
ArrayRef<int64_t> shape,
|
||||
Type elementType) {
|
||||
if (shape.empty()) {
|
||||
if (loc)
|
||||
|
@ -212,7 +212,7 @@ bool VectorType::verifyConstructionInvariants(llvm::Optional<Location> loc,
|
|||
return true;
|
||||
}
|
||||
|
||||
if (any_of(shape, [](int i) { return i < 0; })) {
|
||||
if (any_of(shape, [](int64_t i) { return i < 0; })) {
|
||||
if (loc)
|
||||
context->emitError(*loc, "vector types must have static shape");
|
||||
return true;
|
||||
|
@ -220,7 +220,7 @@ bool VectorType::verifyConstructionInvariants(llvm::Optional<Location> loc,
|
|||
return false;
|
||||
}
|
||||
|
||||
ArrayRef<int> VectorType::getShape() const {
|
||||
ArrayRef<int64_t> VectorType::getShape() const {
|
||||
return static_cast<ImplType *>(type)->getShape();
|
||||
}
|
||||
|
||||
|
@ -241,12 +241,13 @@ static inline bool checkTensorElementType(Optional<Location> location,
|
|||
|
||||
/// RankedTensorType
|
||||
|
||||
RankedTensorType RankedTensorType::get(ArrayRef<int> shape, Type elementType) {
|
||||
RankedTensorType RankedTensorType::get(ArrayRef<int64_t> shape,
|
||||
Type elementType) {
|
||||
return Base::get(elementType.getContext(), StandardTypes::RankedTensor, shape,
|
||||
elementType);
|
||||
}
|
||||
|
||||
RankedTensorType RankedTensorType::getChecked(ArrayRef<int> shape,
|
||||
RankedTensorType RankedTensorType::getChecked(ArrayRef<int64_t> shape,
|
||||
Type elementType,
|
||||
Location location) {
|
||||
return Base::getChecked(location, elementType.getContext(),
|
||||
|
@ -254,16 +255,16 @@ RankedTensorType RankedTensorType::getChecked(ArrayRef<int> shape,
|
|||
}
|
||||
|
||||
bool RankedTensorType::verifyConstructionInvariants(
|
||||
llvm::Optional<Location> loc, MLIRContext *context, ArrayRef<int> shape,
|
||||
llvm::Optional<Location> loc, MLIRContext *context, ArrayRef<int64_t> shape,
|
||||
Type elementType) {
|
||||
return checkTensorElementType(loc, context, elementType);
|
||||
}
|
||||
|
||||
ArrayRef<int> RankedTensorType::getShape() const {
|
||||
ArrayRef<int64_t> RankedTensorType::getShape() const {
|
||||
return static_cast<ImplType *>(type)->getShape();
|
||||
}
|
||||
|
||||
ArrayRef<int> MemRefType::getShape() const {
|
||||
ArrayRef<int64_t> MemRefType::getShape() const {
|
||||
return static_cast<ImplType *>(type)->getShape();
|
||||
}
|
||||
|
||||
|
@ -291,7 +292,7 @@ bool UnrankedTensorType::verifyConstructionInvariants(
|
|||
/// type would be ill-formed, return nullptr. If the location is provided,
|
||||
/// emit detailed error messages. To emit errors when the location is unknown,
|
||||
/// pass in an instance of UnknownLoc.
|
||||
MemRefType MemRefType::getImpl(ArrayRef<int> shape, Type elementType,
|
||||
MemRefType MemRefType::getImpl(ArrayRef<int64_t> shape, Type elementType,
|
||||
ArrayRef<AffineMap> affineMapComposition,
|
||||
unsigned memorySpace,
|
||||
Optional<Location> location) {
|
||||
|
@ -346,12 +347,7 @@ unsigned MemRefType::getMemorySpace() const {
|
|||
}
|
||||
|
||||
unsigned MemRefType::getNumDynamicDims() const {
|
||||
unsigned numDynamicDims = 0;
|
||||
for (int dimSize : getShape()) {
|
||||
if (dimSize == -1)
|
||||
++numDynamicDims;
|
||||
}
|
||||
return numDynamicDims;
|
||||
return llvm::count_if(getShape(), [](int64_t i) { return i < 0; });
|
||||
}
|
||||
|
||||
// Define type identifiers.
|
||||
|
|
|
@ -131,12 +131,12 @@ struct VectorOrTensorTypeStorage : public TypeStorage {
|
|||
/// Vector Type Storage and Uniquing.
|
||||
struct VectorTypeStorage : public VectorOrTensorTypeStorage {
|
||||
VectorTypeStorage(unsigned shapeSize, Type elementTy,
|
||||
const int *shapeElements)
|
||||
const int64_t *shapeElements)
|
||||
: VectorOrTensorTypeStorage(elementTy, shapeSize),
|
||||
shapeElements(shapeElements) {}
|
||||
|
||||
/// The hash key used for uniquing.
|
||||
using KeyTy = std::pair<ArrayRef<int>, Type>;
|
||||
using KeyTy = std::pair<ArrayRef<int64_t>, Type>;
|
||||
bool operator==(const KeyTy &key) const {
|
||||
return key == KeyTy(getShape(), elementType);
|
||||
}
|
||||
|
@ -145,28 +145,28 @@ struct VectorTypeStorage : public VectorOrTensorTypeStorage {
|
|||
static VectorTypeStorage *construct(TypeStorageAllocator &allocator,
|
||||
const KeyTy &key) {
|
||||
// Copy the shape into the bump pointer.
|
||||
ArrayRef<int> shape = allocator.copyInto(key.first);
|
||||
ArrayRef<int64_t> shape = allocator.copyInto(key.first);
|
||||
|
||||
// Initialize the memory using placement new.
|
||||
return new (allocator.allocate<VectorTypeStorage>())
|
||||
VectorTypeStorage(shape.size(), key.second, shape.data());
|
||||
}
|
||||
|
||||
ArrayRef<int> getShape() const {
|
||||
return ArrayRef<int>(shapeElements, getSubclassData());
|
||||
ArrayRef<int64_t> getShape() const {
|
||||
return ArrayRef<int64_t>(shapeElements, getSubclassData());
|
||||
}
|
||||
|
||||
const int *shapeElements;
|
||||
const int64_t *shapeElements;
|
||||
};
|
||||
|
||||
struct RankedTensorTypeStorage : public VectorOrTensorTypeStorage {
|
||||
RankedTensorTypeStorage(unsigned shapeSize, Type elementTy,
|
||||
const int *shapeElements)
|
||||
const int64_t *shapeElements)
|
||||
: VectorOrTensorTypeStorage(elementTy, shapeSize),
|
||||
shapeElements(shapeElements) {}
|
||||
|
||||
/// The hash key used for uniquing.
|
||||
using KeyTy = std::pair<ArrayRef<int>, Type>;
|
||||
using KeyTy = std::pair<ArrayRef<int64_t>, Type>;
|
||||
bool operator==(const KeyTy &key) const {
|
||||
return key == KeyTy(getShape(), elementType);
|
||||
}
|
||||
|
@ -175,18 +175,18 @@ struct RankedTensorTypeStorage : public VectorOrTensorTypeStorage {
|
|||
static RankedTensorTypeStorage *construct(TypeStorageAllocator &allocator,
|
||||
const KeyTy &key) {
|
||||
// Copy the shape into the bump pointer.
|
||||
ArrayRef<int> shape = allocator.copyInto(key.first);
|
||||
ArrayRef<int64_t> shape = allocator.copyInto(key.first);
|
||||
|
||||
// Initialize the memory using placement new.
|
||||
return new (allocator.allocate<RankedTensorTypeStorage>())
|
||||
RankedTensorTypeStorage(shape.size(), key.second, shape.data());
|
||||
}
|
||||
|
||||
ArrayRef<int> getShape() const {
|
||||
return ArrayRef<int>(shapeElements, getSubclassData());
|
||||
ArrayRef<int64_t> getShape() const {
|
||||
return ArrayRef<int64_t>(shapeElements, getSubclassData());
|
||||
}
|
||||
|
||||
const int *shapeElements;
|
||||
const int64_t *shapeElements;
|
||||
};
|
||||
|
||||
struct UnrankedTensorTypeStorage : public VectorOrTensorTypeStorage {
|
||||
|
@ -203,7 +203,7 @@ struct UnrankedTensorTypeStorage : public VectorOrTensorTypeStorage {
|
|||
|
||||
struct MemRefTypeStorage : public TypeStorage {
|
||||
MemRefTypeStorage(unsigned shapeSize, Type elementType,
|
||||
const int *shapeElements, const unsigned numAffineMaps,
|
||||
const int64_t *shapeElements, const unsigned numAffineMaps,
|
||||
AffineMap const *affineMapList, const unsigned memorySpace)
|
||||
: TypeStorage(shapeSize), elementType(elementType),
|
||||
shapeElements(shapeElements), numAffineMaps(numAffineMaps),
|
||||
|
@ -212,7 +212,8 @@ struct MemRefTypeStorage : public TypeStorage {
|
|||
/// The hash key used for uniquing.
|
||||
// MemRefs are uniqued based on their shape, element type, affine map
|
||||
// composition, and memory space.
|
||||
using KeyTy = std::tuple<ArrayRef<int>, Type, ArrayRef<AffineMap>, unsigned>;
|
||||
using KeyTy =
|
||||
std::tuple<ArrayRef<int64_t>, Type, ArrayRef<AffineMap>, unsigned>;
|
||||
bool operator==(const KeyTy &key) const {
|
||||
return key == KeyTy(getShape(), elementType, getAffineMaps(), memorySpace);
|
||||
}
|
||||
|
@ -221,7 +222,7 @@ struct MemRefTypeStorage : public TypeStorage {
|
|||
static MemRefTypeStorage *construct(TypeStorageAllocator &allocator,
|
||||
const KeyTy &key) {
|
||||
// Copy the shape into the bump pointer.
|
||||
ArrayRef<int> shape = allocator.copyInto(std::get<0>(key));
|
||||
ArrayRef<int64_t> shape = allocator.copyInto(std::get<0>(key));
|
||||
|
||||
// Copy the affine map composition into the bump pointer.
|
||||
ArrayRef<AffineMap> affineMapComposition =
|
||||
|
@ -234,8 +235,8 @@ struct MemRefTypeStorage : public TypeStorage {
|
|||
affineMapComposition.data(), std::get<3>(key));
|
||||
}
|
||||
|
||||
ArrayRef<int> getShape() const {
|
||||
return ArrayRef<int>(shapeElements, getSubclassData());
|
||||
ArrayRef<int64_t> getShape() const {
|
||||
return ArrayRef<int64_t>(shapeElements, getSubclassData());
|
||||
}
|
||||
|
||||
ArrayRef<AffineMap> getAffineMaps() const {
|
||||
|
@ -245,7 +246,7 @@ struct MemRefTypeStorage : public TypeStorage {
|
|||
/// The type of each scalar element of the memref.
|
||||
Type elementType;
|
||||
/// An array of integers which stores the shape dimension sizes.
|
||||
const int *shapeElements;
|
||||
const int64_t *shapeElements;
|
||||
/// The number of affine maps in the 'affineMapList' array.
|
||||
const unsigned numAffineMaps;
|
||||
/// List of affine maps in the memref's layout/index map composition.
|
||||
|
|
|
@ -183,7 +183,7 @@ public:
|
|||
// Type parsing.
|
||||
VectorType parseVectorType();
|
||||
ParseResult parseXInDimensionList();
|
||||
ParseResult parseDimensionListRanked(SmallVectorImpl<int> &dimensions);
|
||||
ParseResult parseDimensionListRanked(SmallVectorImpl<int64_t> &dimensions);
|
||||
Type parseExtendedType();
|
||||
Type parseTensorType();
|
||||
Type parseMemRefType();
|
||||
|
@ -386,13 +386,13 @@ VectorType Parser::parseVectorType() {
|
|||
if (getToken().isNot(Token::integer))
|
||||
return (emitError("expected dimension size in vector type"), nullptr);
|
||||
|
||||
SmallVector<int, 4> dimensions;
|
||||
SmallVector<int64_t, 4> dimensions;
|
||||
while (getToken().is(Token::integer)) {
|
||||
// Make sure this integer value is in bound and valid.
|
||||
auto dimension = getToken().getUnsignedIntegerValue();
|
||||
if (!dimension.hasValue())
|
||||
return (emitError("invalid dimension in vector type"), nullptr);
|
||||
dimensions.push_back((int)dimension.getValue());
|
||||
dimensions.push_back((int64_t)dimension.getValue());
|
||||
|
||||
consumeToken(Token::integer);
|
||||
|
||||
|
@ -442,16 +442,17 @@ ParseResult Parser::parseXInDimensionList() {
|
|||
/// dimension-list-ranked ::= (dimension `x`)*
|
||||
/// dimension ::= `?` | integer-literal
|
||||
///
|
||||
ParseResult Parser::parseDimensionListRanked(SmallVectorImpl<int> &dimensions) {
|
||||
ParseResult
|
||||
Parser::parseDimensionListRanked(SmallVectorImpl<int64_t> &dimensions) {
|
||||
while (getToken().isAny(Token::integer, Token::question)) {
|
||||
if (consumeIf(Token::question)) {
|
||||
dimensions.push_back(-1);
|
||||
} else {
|
||||
// Make sure this integer value is in bound and valid.
|
||||
auto dimension = getToken().getUnsignedIntegerValue();
|
||||
if (!dimension.hasValue() || (int)dimension.getValue() < 0)
|
||||
if (!dimension.hasValue() || (int64_t)dimension.getValue() < 0)
|
||||
return emitError("invalid dimension");
|
||||
dimensions.push_back((int)dimension.getValue());
|
||||
dimensions.push_back((int64_t)dimension.getValue());
|
||||
consumeToken(Token::integer);
|
||||
}
|
||||
|
||||
|
@ -540,7 +541,7 @@ Type Parser::parseTensorType() {
|
|||
return nullptr;
|
||||
|
||||
bool isUnranked;
|
||||
SmallVector<int, 4> dimensions;
|
||||
SmallVector<int64_t, 4> dimensions;
|
||||
|
||||
if (consumeIf(Token::star)) {
|
||||
// This is an unranked tensor type.
|
||||
|
@ -580,7 +581,7 @@ Type Parser::parseMemRefType() {
|
|||
if (parseToken(Token::less, "expected '<' in memref type"))
|
||||
return nullptr;
|
||||
|
||||
SmallVector<int, 4> dimensions;
|
||||
SmallVector<int64_t, 4> dimensions;
|
||||
if (parseDimensionListRanked(dimensions))
|
||||
return nullptr;
|
||||
|
||||
|
@ -706,12 +707,12 @@ public:
|
|||
|
||||
ArrayRef<Attribute> getValues() const { return storage; }
|
||||
|
||||
ArrayRef<int> getShape() const { return shape; }
|
||||
ArrayRef<int64_t> getShape() const { return shape; }
|
||||
|
||||
private:
|
||||
/// Parse either a single element or a list of elements. Return the dimensions
|
||||
/// of the parsed sub-tensor in dims.
|
||||
ParseResult parseElementOrList(llvm::SmallVectorImpl<int> &dims);
|
||||
ParseResult parseElementOrList(llvm::SmallVectorImpl<int64_t> &dims);
|
||||
|
||||
/// Parse a list of either lists or elements, returning the dimensions of the
|
||||
/// parsed sub-tensors in dims. For example:
|
||||
|
@ -719,11 +720,11 @@ private:
|
|||
/// parseList([[1, 2], [3, 4]]) -> Success, [2, 2]
|
||||
/// parseList([[1, 2], 3]) -> Failure
|
||||
/// parseList([[1, [2, 3]], [4, [5]]]) -> Failure
|
||||
ParseResult parseList(llvm::SmallVectorImpl<int> &dims);
|
||||
ParseResult parseList(llvm::SmallVectorImpl<int64_t> &dims);
|
||||
|
||||
Parser &p;
|
||||
Type eltTy;
|
||||
SmallVector<int, 4> shape;
|
||||
SmallVector<int64_t, 4> shape;
|
||||
std::vector<Attribute> storage;
|
||||
};
|
||||
} // namespace
|
||||
|
@ -731,7 +732,7 @@ private:
|
|||
/// Parse either a single element or a list of elements. Return the dimensions
|
||||
/// of the parsed sub-tensor in dims.
|
||||
ParseResult
|
||||
TensorLiteralParser::parseElementOrList(llvm::SmallVectorImpl<int> &dims) {
|
||||
TensorLiteralParser::parseElementOrList(llvm::SmallVectorImpl<int64_t> &dims) {
|
||||
switch (p.getToken().getKind()) {
|
||||
case Token::l_square:
|
||||
return parseList(dims);
|
||||
|
@ -789,11 +790,12 @@ TensorLiteralParser::parseElementOrList(llvm::SmallVectorImpl<int> &dims) {
|
|||
/// parseList([[1, 2], [3, 4]]) -> Success, [2, 2]
|
||||
/// parseList([[1, 2], 3]) -> Failure
|
||||
/// parseList([[1, [2, 3]], [4, [5]]]) -> Failure
|
||||
ParseResult TensorLiteralParser::parseList(llvm::SmallVectorImpl<int> &dims) {
|
||||
ParseResult
|
||||
TensorLiteralParser::parseList(llvm::SmallVectorImpl<int64_t> &dims) {
|
||||
p.consumeToken(Token::l_square);
|
||||
|
||||
auto checkDims = [&](const llvm::SmallVectorImpl<int> &prevDims,
|
||||
const llvm::SmallVectorImpl<int> &newDims) {
|
||||
auto checkDims = [&](const llvm::SmallVectorImpl<int64_t> &prevDims,
|
||||
const llvm::SmallVectorImpl<int64_t> &newDims) {
|
||||
if (prevDims == newDims)
|
||||
return ParseSuccess;
|
||||
return p.emitError("tensor literal is invalid; ranks are not consistent "
|
||||
|
@ -801,10 +803,10 @@ ParseResult TensorLiteralParser::parseList(llvm::SmallVectorImpl<int> &dims) {
|
|||
};
|
||||
|
||||
bool first = true;
|
||||
llvm::SmallVector<int, 4> newDims;
|
||||
llvm::SmallVector<int64_t, 4> newDims;
|
||||
unsigned size = 0;
|
||||
auto parseCommaSeparatedList = [&]() {
|
||||
llvm::SmallVector<int, 4> thisDims;
|
||||
llvm::SmallVector<int64_t, 4> thisDims;
|
||||
if (parseElementOrList(thisDims))
|
||||
return ParseFailure;
|
||||
++size;
|
||||
|
|
|
@ -258,14 +258,14 @@ struct SimplifyAllocConst : public RewritePattern {
|
|||
|
||||
// Ok, we have one or more constant operands. Collect the non-constant ones
|
||||
// and keep track of the resultant memref type to build.
|
||||
SmallVector<int, 4> newShapeConstants;
|
||||
SmallVector<int64_t, 4> newShapeConstants;
|
||||
newShapeConstants.reserve(memrefType.getRank());
|
||||
SmallVector<Value *, 4> newOperands;
|
||||
SmallVector<Value *, 4> droppedOperands;
|
||||
|
||||
unsigned dynamicDimPos = 0;
|
||||
for (unsigned dim = 0, e = memrefType.getRank(); dim < e; ++dim) {
|
||||
int dimSize = memrefType.getDimSize(dim);
|
||||
int64_t dimSize = memrefType.getDimSize(dim);
|
||||
// If this is already static dimension, keep it.
|
||||
if (dimSize != -1) {
|
||||
newShapeConstants.push_back(dimSize);
|
||||
|
@ -794,7 +794,7 @@ Attribute DimOp::constantFold(ArrayRef<Attribute> operands,
|
|||
MLIRContext *context) const {
|
||||
// Constant fold dim when the size along the index referred to is a constant.
|
||||
auto opType = getOperand()->getType();
|
||||
int indexSize = -1;
|
||||
int64_t indexSize = -1;
|
||||
if (auto tensorType = opType.dyn_cast<RankedTensorType>()) {
|
||||
indexSize = tensorType.getShape()[getIndex()];
|
||||
} else if (auto memrefType = opType.dyn_cast<MemRefType>()) {
|
||||
|
@ -1268,7 +1268,7 @@ bool MemRefCastOp::verify() const {
|
|||
return emitOpError("requires input and result ranks to match");
|
||||
|
||||
for (unsigned i = 0, e = opType.getRank(); i != e; ++i) {
|
||||
int opDim = opType.getDimSize(i), resultDim = resType.getDimSize(i);
|
||||
int64_t opDim = opType.getDimSize(i), resultDim = resType.getDimSize(i);
|
||||
if (opDim != -1 && resultDim != -1 && opDim != resultDim)
|
||||
return emitOpError("requires static dimensions to match");
|
||||
}
|
||||
|
@ -1628,7 +1628,7 @@ bool TensorCastOp::verify() const {
|
|||
return emitOpError("requires input and result ranks to match");
|
||||
|
||||
for (unsigned i = 0, e = opRType.getRank(); i != e; ++i) {
|
||||
int opDim = opRType.getDimSize(i), resultDim = resRType.getDimSize(i);
|
||||
int64_t opDim = opRType.getDimSize(i), resultDim = resRType.getDimSize(i);
|
||||
if (opDim != -1 && resultDim != -1 && opDim != resultDim)
|
||||
return emitOpError("requires static dimensions to match");
|
||||
}
|
||||
|
|
|
@ -484,7 +484,7 @@ bool VectorTypeCastOp::verify() const {
|
|||
if (!dstVectorType)
|
||||
return emitOpError(
|
||||
"expects vector as an element of the target memref type");
|
||||
if (llvm::any_of(dstMemrefType.getShape(), [](int s) { return s == -1; }))
|
||||
if (!dstMemrefType.hasStaticShape())
|
||||
return emitOpError("does not support dynamic shapes");
|
||||
|
||||
if (!getOperand()->getType().isa<MemRefType>())
|
||||
|
|
|
@ -360,7 +360,7 @@ llvm::Value *ModuleLowerer::emitMemRefAlloc(ConstOpPointer<AllocOp> allocOp) {
|
|||
SmallVector<llvm::Value *, 4> sizes;
|
||||
sizes.reserve(allocOp->getNumOperands());
|
||||
unsigned i = 0;
|
||||
for (int s : type.getShape()) {
|
||||
for (int64_t s : type.getShape()) {
|
||||
llvm::Value *value = (s == -1)
|
||||
? valueMapping.lookup(allocOp->getOperand(i++))
|
||||
: getIndexConstant(s);
|
||||
|
|
|
@ -114,7 +114,7 @@ struct StrideInfo {
|
|||
/// successively nested.
|
||||
// TODO(bondhugula): make this work with non-identity layout maps.
|
||||
static void getMultiLevelStrides(const MemRefRegion ®ion,
|
||||
ArrayRef<int> bufferShape,
|
||||
ArrayRef<int64_t> bufferShape,
|
||||
SmallVectorImpl<StrideInfo> *strideInfos) {
|
||||
if (bufferShape.size() <= 1)
|
||||
return;
|
||||
|
@ -122,7 +122,7 @@ static void getMultiLevelStrides(const MemRefRegion ®ion,
|
|||
int64_t numEltPerStride = 1;
|
||||
int64_t stride = 1;
|
||||
for (int d = bufferShape.size() - 1; d >= 1; d--) {
|
||||
int dimSize = region.memref->getType().cast<MemRefType>().getDimSize(d);
|
||||
int64_t dimSize = region.memref->getType().cast<MemRefType>().getDimSize(d);
|
||||
stride *= dimSize;
|
||||
numEltPerStride *= bufferShape[d];
|
||||
// A stride is needed only if the region has a shorter extent than the
|
||||
|
@ -169,7 +169,7 @@ bool DmaGeneration::generateDma(const MemRefRegion ®ion, ForInst *forInst,
|
|||
Value *zeroIndex = top.create<ConstantIndexOp>(loc, 0);
|
||||
|
||||
unsigned rank = memRefType.getRank();
|
||||
SmallVector<int, 4> fastBufferShape;
|
||||
SmallVector<int64_t, 4> fastBufferShape;
|
||||
|
||||
// Compute the extents of the buffer.
|
||||
std::vector<SmallVector<int64_t, 4>> lbs;
|
||||
|
|
|
@ -711,7 +711,7 @@ static Value *createPrivateMemRef(ForInst *forInst,
|
|||
// Compute MemRefRegion for 'srcStoreOpInst' at depth 'dstLoopDepth'.
|
||||
MemRefRegion region;
|
||||
getMemRefRegion(srcStoreOpInst, dstLoopDepth, ®ion);
|
||||
SmallVector<int, 4> newShape;
|
||||
SmallVector<int64_t, 4> newShape;
|
||||
std::vector<SmallVector<int64_t, 4>> lbs;
|
||||
SmallVector<int64_t, 8> lbDivisors;
|
||||
lbs.reserve(rank);
|
||||
|
|
|
@ -96,9 +96,9 @@ private:
|
|||
MLFuncGlobalLoweringState *state;
|
||||
|
||||
MemRefType memrefType;
|
||||
ArrayRef<int> memrefShape;
|
||||
ArrayRef<int64_t> memrefShape;
|
||||
VectorType vectorType;
|
||||
ArrayRef<int> vectorShape;
|
||||
ArrayRef<int64_t> vectorShape;
|
||||
AffineMap permutationMap;
|
||||
|
||||
/// Used for staging the transfer in a local scalar buffer.
|
||||
|
@ -232,9 +232,9 @@ VectorTransferRewriter<VectorTransferOpTy>::makeVectorTransferAccessInfo() {
|
|||
}
|
||||
emitter
|
||||
.template bindZipRangeConstants<ConstantIndexOp>(
|
||||
llvm::zip(lbs, SmallVector<int, 8>(ivs.size(), 0)))
|
||||
llvm::zip(lbs, SmallVector<int64_t, 8>(ivs.size(), 0)))
|
||||
.template bindZipRangeConstants<ConstantIndexOp>(
|
||||
llvm::zip(steps, SmallVector<int, 8>(ivs.size(), 1)));
|
||||
llvm::zip(steps, SmallVector<int64_t, 8>(ivs.size(), 1)));
|
||||
|
||||
return VectorTransferAccessInfo{ivs,
|
||||
makeExprs(lbs),
|
||||
|
|
|
@ -187,7 +187,7 @@ struct MaterializationState {
|
|||
MaterializationState() : hwVectorSize(clVectorSize.size(), 0) {
|
||||
std::copy(clVectorSize.begin(), clVectorSize.end(), hwVectorSize.begin());
|
||||
}
|
||||
SmallVector<int, 8> hwVectorSize;
|
||||
SmallVector<int64_t, 8> hwVectorSize;
|
||||
VectorType superVectorType;
|
||||
VectorType hwVectorType;
|
||||
SmallVector<unsigned, 8> hwVectorInstance;
|
||||
|
@ -458,7 +458,7 @@ static AffineMap projectedPermutationMap(VectorTransferOpTy *transfer,
|
|||
SmallVector<AffineExpr, 4> keep;
|
||||
MLIRContext *context = transfer->getInstruction()->getContext();
|
||||
functional::zipApply(
|
||||
[&dim, &keep, context](int shape, int ratio) {
|
||||
[&dim, &keep, context](int64_t shape, int64_t ratio) {
|
||||
assert(shape >= ratio && "shape dim must be greater than ratio dim");
|
||||
if (shape != ratio) {
|
||||
// HW vector is not full instantiated along this dim, keep it.
|
||||
|
|
|
@ -87,8 +87,8 @@ static bool doubleBuffer(Value *oldMemRef, ForInst *forInst) {
|
|||
// Doubles the shape with a leading dimension extent of 2.
|
||||
auto doubleShape = [&](MemRefType oldMemRefType) -> MemRefType {
|
||||
// Add the leading dimension in the shape for the double buffer.
|
||||
ArrayRef<int> oldShape = oldMemRefType.getShape();
|
||||
SmallVector<int, 4> newShape(1 + oldMemRefType.getRank());
|
||||
ArrayRef<int64_t> oldShape = oldMemRefType.getShape();
|
||||
SmallVector<int64_t, 4> newShape(1 + oldMemRefType.getRank());
|
||||
newShape[0] = 2;
|
||||
std::copy(oldShape.begin(), oldShape.end(), newShape.begin() + 1);
|
||||
auto newMemRefType =
|
||||
|
|
|
@ -101,8 +101,8 @@ char VectorizerTestPass::passID = 0;
|
|||
|
||||
void VectorizerTestPass::testVectorShapeRatio(Function *f) {
|
||||
using matcher::Op;
|
||||
SmallVector<int, 8> shape(clTestVectorShapeRatio.begin(),
|
||||
clTestVectorShapeRatio.end());
|
||||
SmallVector<int64_t, 8> shape(clTestVectorShapeRatio.begin(),
|
||||
clTestVectorShapeRatio.end());
|
||||
auto subVectorType = VectorType::get(shape, Type::getF32(f->getContext()));
|
||||
// Only filter instructions that operate on a strict super-vector and have one
|
||||
// return. This makes testing easier.
|
||||
|
|
|
@ -667,7 +667,7 @@ char Vectorize::passID = 0;
|
|||
namespace {
|
||||
|
||||
struct VectorizationStrategy {
|
||||
ArrayRef<int> vectorSizes;
|
||||
SmallVector<int64_t, 8> vectorSizes;
|
||||
DenseMap<ForInst *, unsigned> loopToVectorDim;
|
||||
};
|
||||
|
||||
|
@ -1280,7 +1280,8 @@ PassResult Vectorize::runOnFunction(Function *f) {
|
|||
for (auto m : matches) {
|
||||
VectorizationStrategy strategy;
|
||||
// TODO(ntv): depending on profitability, elect to reduce the vector size.
|
||||
strategy.vectorSizes = clVirtualVectorSize;
|
||||
strategy.vectorSizes.assign(clVirtualVectorSize.begin(),
|
||||
clVirtualVectorSize.end());
|
||||
auto fail = analyzeProfitability(m.second, 1, patternDepth, &strategy);
|
||||
if (fail) {
|
||||
continue;
|
||||
|
|
Loading…
Reference in New Issue