forked from OSchip/llvm-project
[mlir] Flip LinAlg dialect to _Both
This one required more changes than ideal due to overlapping generated name with different return types. Changed getIndexingMaps to getIndexingMapsArray to move it out of the way/highlight that it returns (more expensively) a SmallVector and uses the prefixed name for the Attribute. Differential Revision: https://reviews.llvm.org/D129919
This commit is contained in:
parent
95401b0153
commit
d2c0572b2e
|
@ -58,9 +58,7 @@ def Linalg_Dialect : Dialect {
|
|||
llvm::StringMap<RegionBuilderFunType> namedStructuredOpRegionBuilders;
|
||||
}];
|
||||
|
||||
// TODO: This has overlapping accessors with generated when switched to
|
||||
// prefixed. Fix and update to _Both & then _Prefixed.
|
||||
let emitAccessorPrefix = kEmitAccessorPrefix_Raw;
|
||||
let emitAccessorPrefix = kEmitAccessorPrefix_Both;
|
||||
}
|
||||
|
||||
// Define the function attribute enums matching the OpDSL functions.
|
||||
|
|
|
@ -61,7 +61,7 @@ def LinalgContractionOpInterface : OpInterface<"ContractionOpInterface"> {
|
|||
/*methodName=*/"isRowMajorMatmul",
|
||||
/*args=*/(ins),
|
||||
/*methodBody=*/[{
|
||||
return mlir::isRowMajorMatmul($_op.indexing_maps());
|
||||
return mlir::isRowMajorMatmul($_op.getIndexingMaps());
|
||||
}]>,
|
||||
InterfaceMethod<
|
||||
/*desc=*/[{
|
||||
|
@ -72,7 +72,7 @@ def LinalgContractionOpInterface : OpInterface<"ContractionOpInterface"> {
|
|||
/*methodName=*/"isColumnMajorMatmul",
|
||||
/*args=*/(ins),
|
||||
/*methodBody=*/[{
|
||||
return mlir::isColumnMajorMatmul($_op.indexing_maps());
|
||||
return mlir::isColumnMajorMatmul($_op.getIndexingMaps());
|
||||
}]>,
|
||||
InterfaceMethod<
|
||||
/*desc=*/[{
|
||||
|
@ -83,7 +83,7 @@ def LinalgContractionOpInterface : OpInterface<"ContractionOpInterface"> {
|
|||
/*methodName=*/"isRowMajorBatchMatmul",
|
||||
/*args=*/(ins),
|
||||
/*methodBody=*/[{
|
||||
return mlir::isRowMajorBatchMatmul($_op.indexing_maps());
|
||||
return mlir::isRowMajorBatchMatmul($_op.getIndexingMaps());
|
||||
}]>,
|
||||
];
|
||||
}
|
||||
|
@ -724,7 +724,7 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
|
|||
/*defaultImplementation=*/[{
|
||||
assert(opOperand->getOwner() == this->getOperation());
|
||||
auto indexingMaps =
|
||||
$_op.indexing_maps().template getAsValueRange<AffineMapAttr>();
|
||||
$_op.getIndexingMaps().template getAsValueRange<AffineMapAttr>();
|
||||
return *(indexingMaps.begin() + opOperand->getOperandNumber());
|
||||
}]
|
||||
>,
|
||||
|
@ -739,7 +739,7 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
|
|||
/*defaultImplementation=*/[{
|
||||
assert(result.getOwner() == this->getOperation());
|
||||
auto indexingMaps =
|
||||
$_op.indexing_maps().template getAsValueRange<AffineMapAttr>();
|
||||
$_op.getIndexingMaps().template getAsValueRange<AffineMapAttr>();
|
||||
return *(indexingMaps.begin() + getNumInputs() +
|
||||
result.getResultNumber());
|
||||
}]
|
||||
|
@ -836,18 +836,18 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
|
|||
Return the indexing maps attribute within the current operation.
|
||||
}],
|
||||
/*retTy=*/"ArrayAttr",
|
||||
/*methodName=*/"indexing_maps"
|
||||
/*methodName=*/"getIndexingMaps"
|
||||
>,
|
||||
InterfaceMethod<
|
||||
/*desc=*/[{
|
||||
Return the indexing maps within the current operation.
|
||||
}],
|
||||
/*retTy=*/"SmallVector<AffineMap>",
|
||||
/*methodName=*/"getIndexingMaps",
|
||||
/*methodName=*/"getIndexingMapsArray",
|
||||
/*args=*/(ins),
|
||||
/*methodBody=*/"",
|
||||
/*defaultImplementation=*/[{
|
||||
auto range = $_op.indexing_maps()
|
||||
auto range = $_op.getIndexingMaps()
|
||||
.template getAsValueRange<AffineMapAttr>();
|
||||
return {range.begin(), range.end()};
|
||||
}]
|
||||
|
@ -942,9 +942,7 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
|
|||
/*args=*/(ins),
|
||||
/*methodBody=*/"",
|
||||
/*defaultImplementation=*/[{
|
||||
auto r = $_op.indexing_maps().template getAsRange<AffineMapAttr>();
|
||||
auto maps = llvm::to_vector<8>(
|
||||
llvm::map_range(r, [](AffineMapAttr a) { return a.getValue(); }));
|
||||
auto maps = $_op.getIndexingMapsArray();
|
||||
return concatAffineMaps(maps);
|
||||
}]
|
||||
>,
|
||||
|
@ -1126,7 +1124,7 @@ def LinalgStructuredInterface : OpInterface<"LinalgOp"> {
|
|||
/*methodName=*/"hasOnlyProjectedPermutations",
|
||||
(ins),
|
||||
[{
|
||||
return llvm::all_of($_op.getIndexingMaps(),
|
||||
return llvm::all_of($_op.getIndexingMapsArray(),
|
||||
[](AffineMap map) { return map.isProjectedPermutation(); });
|
||||
}]
|
||||
>
|
||||
|
|
|
@ -50,7 +50,7 @@ def Linalg_InitTensorOp : Linalg_Op<"init_tensor",
|
|||
}];
|
||||
|
||||
let extraClassDeclaration = [{
|
||||
static StringRef getStaticSizesAttrName() {
|
||||
static StringRef getStaticSizesAttrStrName() {
|
||||
return "static_sizes";
|
||||
}
|
||||
|
||||
|
|
|
@ -163,7 +163,8 @@ public:
|
|||
|
||||
StructuredGenerator(OpBuilder &builder, StructuredOpInterface op)
|
||||
: builder(builder), ctx(op.getContext()), loc(op.getLoc()),
|
||||
iterators(op.getIteratorTypes()), maps(op.getIndexingMaps()), op(op) {}
|
||||
iterators(op.getIteratorTypes()), maps(op.getIndexingMapsArray()),
|
||||
op(op) {}
|
||||
|
||||
bool iters(ArrayRef<IteratorType> its) {
|
||||
if (its.size() != iterators.size())
|
||||
|
|
|
@ -69,15 +69,6 @@ def Vector_CombiningKindAttr : DialectAttr<
|
|||
"::mlir::vector::CombiningKindAttr::get($0, $_builder.getContext())";
|
||||
}
|
||||
|
||||
def Vector_AffineMapArrayAttr : TypedArrayAttrBase<AffineMapAttr,
|
||||
"AffineMap array attribute"> {
|
||||
let returnType = [{ ::llvm::SmallVector<::mlir::AffineMap, 4> }];
|
||||
let convertFromStorage = [{
|
||||
llvm::to_vector<4>($_self.getAsValueRange<::mlir::AffineMapAttr>());
|
||||
}];
|
||||
let constBuilderCall = "$_builder.getAffineMapArrayAttr($0)";
|
||||
}
|
||||
|
||||
// TODO: Add an attribute to specify a different algebra with operators other
|
||||
// than the current set: {*, +}.
|
||||
def Vector_ContractionOp :
|
||||
|
@ -90,7 +81,7 @@ def Vector_ContractionOp :
|
|||
]>,
|
||||
Arguments<(ins AnyVector:$lhs, AnyVector:$rhs, AnyType:$acc,
|
||||
Variadic<VectorOf<[I1]>>:$masks,
|
||||
Vector_AffineMapArrayAttr:$indexing_maps,
|
||||
ArrayAttr:$indexing_maps,
|
||||
ArrayAttr:$iterator_types,
|
||||
DefaultValuedAttr<Vector_CombiningKindAttr,
|
||||
"CombiningKind::ADD">:$kind)>,
|
||||
|
@ -241,6 +232,10 @@ def Vector_ContractionOp :
|
|||
ArrayRef<StringRef> getTraitAttrNames();
|
||||
static unsigned getAccOperandIndex() { return 2; }
|
||||
|
||||
llvm::SmallVector<::mlir::AffineMap, 4> getIndexingMapsArray() {
|
||||
return llvm::to_vector<4>(getIndexingMaps().getAsValueRange<::mlir::AffineMapAttr>());
|
||||
}
|
||||
|
||||
// Returns the bounds of each dimension in the iteration space spanned
|
||||
// by the iterator types of this operation.
|
||||
void getIterationBounds(SmallVectorImpl<int64_t> &iterationBounds);
|
||||
|
|
|
@ -87,20 +87,19 @@ SingleWorkgroupReduction::matchAsPerformingReduction(
|
|||
if (!genericOp.hasSingleReductionLoop())
|
||||
return llvm::None;
|
||||
|
||||
if (genericOp.indexing_maps().getValue().size() != 2)
|
||||
auto indexingMaps = genericOp.getIndexingMapsArray();
|
||||
if (indexingMaps.size() != 2)
|
||||
return llvm::None;
|
||||
|
||||
// TODO: create utility functions for these checks in Linalg
|
||||
// and use them.
|
||||
auto inputMap = genericOp.indexing_maps().getValue()[0].cast<AffineMapAttr>();
|
||||
auto outputMap =
|
||||
genericOp.indexing_maps().getValue()[1].cast<AffineMapAttr>();
|
||||
auto inputMap = indexingMaps[0];
|
||||
auto outputMap = indexingMaps[1];
|
||||
// The indexing map for the input should be `(i) -> (i)`.
|
||||
if (inputMap.getValue() !=
|
||||
AffineMap::get(1, 0, getAffineDimExpr(0, op->getContext())))
|
||||
if (inputMap != AffineMap::get(1, 0, getAffineDimExpr(0, op->getContext())))
|
||||
return llvm::None;
|
||||
// The indexing map for the input should be `(i) -> (0)`.
|
||||
if (outputMap.getValue() !=
|
||||
if (outputMap !=
|
||||
AffineMap::get(1, 0, getAffineConstantExpr(0, op->getContext())))
|
||||
return llvm::None;
|
||||
|
||||
|
|
|
@ -290,7 +290,7 @@ PrepareContractToGPUMMASync::matchAndRewrite(vector::ContractionOp op,
|
|||
bindDims(rewriter.getContext(), m, n, k);
|
||||
static constexpr std::array<int64_t, 2> perm = {1, 0};
|
||||
auto iteratorTypes = op.getIteratorTypes().getValue();
|
||||
SmallVector<AffineMap, 4> maps = op.getIndexingMaps();
|
||||
SmallVector<AffineMap, 4> maps = op.getIndexingMapsArray();
|
||||
if (iteratorTypes.size() != 3)
|
||||
return failure();
|
||||
if (!(isParallelIterator(iteratorTypes[0]) &&
|
||||
|
|
|
@ -78,9 +78,10 @@ static bool contractSupportsMMAMatrixType(vector::ContractionOp contract,
|
|||
// The contract needs to represent a matmul to be able to convert to
|
||||
// MMAMatrix matmul.
|
||||
if (!useNvGpu &&
|
||||
contract.getIndexingMaps() != infer({{m, k}, {k, n}, {m, n}}))
|
||||
contract.getIndexingMapsArray() != infer({{m, k}, {k, n}, {m, n}}))
|
||||
return false;
|
||||
if (useNvGpu && contract.getIndexingMaps() != infer({{m, k}, {n, k}, {m, n}}))
|
||||
if (useNvGpu &&
|
||||
contract.getIndexingMapsArray() != infer({{m, k}, {n, k}, {m, n}}))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@ -290,7 +291,7 @@ struct PrepareContractToGPUMMA
|
|||
bindDims(rewriter.getContext(), m, n, k);
|
||||
static constexpr std::array<int64_t, 2> perm = {1, 0};
|
||||
auto iteratorTypes = op.getIteratorTypes().getValue();
|
||||
SmallVector<AffineMap, 4> maps = op.getIndexingMaps();
|
||||
SmallVector<AffineMap, 4> maps = op.getIndexingMapsArray();
|
||||
if (!(isParallelIterator(iteratorTypes[0]) &&
|
||||
isParallelIterator(iteratorTypes[1]) &&
|
||||
isReductionIterator(iteratorTypes[2])))
|
||||
|
|
|
@ -120,7 +120,7 @@ static MatchContractionResult isContractionInterfaceImpl(Operation *op) {
|
|||
return MatchContractionResult::NotLinalgOp;
|
||||
if (linalgOp.getNumInputs() != 2 || linalgOp.getNumOutputs() != 1)
|
||||
return MatchContractionResult::WrongNumOperands;
|
||||
auto mapRange = linalgOp.indexing_maps().getAsValueRange<AffineMapAttr>();
|
||||
auto mapRange = linalgOp.getIndexingMapsArray();
|
||||
if (linalgOp.getNumReductionLoops() == 0)
|
||||
return MatchContractionResult::NoReduction;
|
||||
if (llvm::any_of(mapRange,
|
||||
|
@ -280,7 +280,7 @@ static MatchConvolutionResult isConvolutionInterfaceImpl(Operation *op) {
|
|||
if (linalgOp.getNumInputs() < 2 || linalgOp.getNumOutputs() != 1)
|
||||
return MatchConvolutionResult::WrongNumOperands;
|
||||
|
||||
auto indexingMaps = linalgOp.getIndexingMaps();
|
||||
auto indexingMaps = linalgOp.getIndexingMapsArray();
|
||||
|
||||
// Check the input indexing map has the right form.
|
||||
ConvAccessExprWalker inputExprWalker;
|
||||
|
@ -645,10 +645,10 @@ LogicalResult mlir::linalg::detail::verifyStructuredOpInterface(Operation *op) {
|
|||
return failure();
|
||||
|
||||
// All input/output operands must be indexed.
|
||||
if (static_cast<int64_t>(linalgOp.indexing_maps().size()) !=
|
||||
if (static_cast<int64_t>(linalgOp.getIndexingMapsArray().size()) !=
|
||||
linalgOp.getNumInputsAndOutputs())
|
||||
return op->emitOpError("expected the number of indexing_map (")
|
||||
<< linalgOp.indexing_maps().size()
|
||||
<< linalgOp.getIndexingMapsArray().size()
|
||||
<< ") to be equal to the number of input/output operands ("
|
||||
<< linalgOp.getNumInputsAndOutputs() << ")";
|
||||
|
||||
|
|
|
@ -1107,7 +1107,7 @@ struct EraseIdentityGenericOp : public OpRewritePattern<GenericOp> {
|
|||
LogicalResult matchAndRewrite(GenericOp genericOp,
|
||||
PatternRewriter &rewriter) const override {
|
||||
// Check all indexing maps are identity.
|
||||
if (llvm::any_of(genericOp.getIndexingMaps(),
|
||||
if (llvm::any_of(genericOp.getIndexingMapsArray(),
|
||||
[](AffineMap map) { return !map.isIdentity(); }))
|
||||
return failure();
|
||||
|
||||
|
@ -1854,7 +1854,7 @@ struct InferStaticShapeOfOperands : public OpInterfaceRewritePattern<LinalgOp> {
|
|||
return failure();
|
||||
|
||||
// Maps must be projected permutations.
|
||||
if (llvm::any_of(linalgOp.getIndexingMaps(), [](AffineMap map) {
|
||||
if (llvm::any_of(linalgOp.getIndexingMapsArray(), [](AffineMap map) {
|
||||
return !map.isProjectedPermutation();
|
||||
}))
|
||||
return failure();
|
||||
|
|
|
@ -91,7 +91,7 @@ public:
|
|||
// entirely in the compiler, without needing to turn all indices into
|
||||
// Values, and then do affine apply on them, and then match back the
|
||||
// constant again.
|
||||
if (!llvm::all_of(genericOp.getIndexingMaps(),
|
||||
if (!llvm::all_of(genericOp.getIndexingMapsArray(),
|
||||
[](AffineMap map) { return map.isPermutation(); }))
|
||||
return failure();
|
||||
|
||||
|
@ -155,8 +155,8 @@ public:
|
|||
|
||||
SmallVector<SmallVector<unsigned>> inputDims;
|
||||
for (int i = 0; i < numInputs; ++i)
|
||||
inputDims.push_back(getDimPositions(genericOp.getIndexingMaps()[i]));
|
||||
auto outputDims = getDimPositions(genericOp.getIndexingMaps().back());
|
||||
inputDims.push_back(getDimPositions(genericOp.getIndexingMapsArray()[i]));
|
||||
auto outputDims = getDimPositions(genericOp.getIndexingMapsArray().back());
|
||||
auto outputShape = outputType.getShape();
|
||||
|
||||
// Allocate small vectors for index delinearization. Initial values do not
|
||||
|
@ -268,7 +268,7 @@ struct FoldConstantTranspose : public FoldConstantBase<FoldConstantTranspose> {
|
|||
|
||||
bool matchIndexingMaps(GenericOp genericOp) const {
|
||||
// We should have one input and one output.
|
||||
return genericOp.getIndexingMaps().size() == 2;
|
||||
return genericOp.getIndexingMapsArray().size() == 2;
|
||||
}
|
||||
|
||||
RegionComputationFn getRegionComputeFn(GenericOp genericOp) const {
|
||||
|
|
|
@ -145,7 +145,7 @@ DecomposeLinalgOp::createPeeledGenericOp(GenericOp genericOp,
|
|||
Block *body = genericOp.getBody();
|
||||
Operation *peeledScalarOperation = &(*body->begin());
|
||||
SmallVector<AffineMap> peeledGenericOpIndexingMaps =
|
||||
genericOp.getIndexingMaps();
|
||||
genericOp.getIndexingMapsArray();
|
||||
|
||||
/// Compute the loop ranges for operation. This is the shape of the result of
|
||||
/// the generic op for the peeled operation.
|
||||
|
|
|
@ -171,7 +171,7 @@ struct FoldUnitDimLoops : public OpRewritePattern<GenericOp> {
|
|||
using OpRewritePattern<GenericOp>::OpRewritePattern;
|
||||
LogicalResult matchAndRewrite(GenericOp genericOp,
|
||||
PatternRewriter &rewriter) const override {
|
||||
SmallVector<AffineMap, 4> indexingMaps = genericOp.getIndexingMaps();
|
||||
SmallVector<AffineMap, 4> indexingMaps = genericOp.getIndexingMapsArray();
|
||||
if (indexingMaps.empty())
|
||||
return failure();
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ static bool areElementwiseOpsFusable(GenericOp producer, GenericOp consumer,
|
|||
};
|
||||
|
||||
for (auto pair :
|
||||
llvm::zip(consumer->getOperands(), consumer.getIndexingMaps())) {
|
||||
llvm::zip(consumer->getOperands(), consumer.getIndexingMapsArray())) {
|
||||
Value operand = std::get<0>(pair);
|
||||
if (operand == consumerOpOperand->get())
|
||||
continue;
|
||||
|
@ -709,7 +709,7 @@ fuseWithReshapeByExpansion(GenericOp genericOp, Operation *reshapeOp,
|
|||
return llvm::None;
|
||||
|
||||
SmallVector<AffineMap, 4> expandedOpIndexingMaps = llvm::to_vector<4>(
|
||||
llvm::map_range(genericOp.getIndexingMaps(), [&](AffineMap m) {
|
||||
llvm::map_range(genericOp.getIndexingMapsArray(), [&](AffineMap m) {
|
||||
return getIndexingMapInExpandedOp(rewriter, m, expansionInfo);
|
||||
}));
|
||||
|
||||
|
@ -1008,7 +1008,7 @@ getCollapsableIterationSpaceDims(GenericOp genericOp, OpOperand *fusableOperand,
|
|||
if (!genericOp.hasTensorSemantics() || genericOp.getNumOutputs() != 1)
|
||||
return {};
|
||||
|
||||
if (!llvm::all_of(genericOp.getIndexingMaps(), [](AffineMap map) {
|
||||
if (!llvm::all_of(genericOp.getIndexingMapsArray(), [](AffineMap map) {
|
||||
return map.isProjectedPermutation();
|
||||
})) {
|
||||
return {};
|
||||
|
@ -1085,9 +1085,11 @@ getCollapsableIterationSpaceDims(GenericOp genericOp, OpOperand *fusableOperand,
|
|||
}
|
||||
|
||||
// Check that the sequence is preserved in all indexing maps.
|
||||
if (llvm::any_of(genericOp.getIndexingMaps(), [&](AffineMap indexingMap) {
|
||||
return !isDimSequencePreserved(indexingMap, foldedIterationSpaceDims);
|
||||
}))
|
||||
if (llvm::any_of(genericOp.getIndexingMapsArray(),
|
||||
[&](AffineMap indexingMap) {
|
||||
return !isDimSequencePreserved(indexingMap,
|
||||
foldedIterationSpaceDims);
|
||||
}))
|
||||
continue;
|
||||
|
||||
processedIterationDims.insert(foldedIterationSpaceDims.begin(),
|
||||
|
@ -1350,7 +1352,7 @@ static FailureOr<SmallVector<Value>> collapseGenericOpIterationDims(
|
|||
|
||||
// Get the indexing maps.
|
||||
auto indexingMaps = llvm::to_vector(
|
||||
llvm::map_range(genericOp.getIndexingMaps(), [&](AffineMap map) {
|
||||
llvm::map_range(genericOp.getIndexingMapsArray(), [&](AffineMap map) {
|
||||
return getCollapsedOpIndexingMap(map, collapsingInfo);
|
||||
}));
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ FailureOr<GenericOp> mlir::linalg::generalizeNamedOp(RewriterBase &rewriter,
|
|||
|
||||
SmallVector<Value> inputOperands = linalgOp.getInputOperands();
|
||||
SmallVector<Value> outputOperands = linalgOp.getOutputOperands();
|
||||
SmallVector<AffineMap> indexingMaps = linalgOp.getIndexingMaps();
|
||||
SmallVector<AffineMap> indexingMaps = linalgOp.getIndexingMapsArray();
|
||||
SmallVector<StringRef> iterators = llvm::to_vector<4>(
|
||||
linalgOp.iterator_types().getAsValueRange<StringAttr>());
|
||||
SmallVector<RankedTensorType> resultTypes = linalgOp.getOutputTensorTypes();
|
||||
|
|
|
@ -77,7 +77,7 @@ template <typename SingleInputPoolingOp>
|
|||
static InputAndOutputIndices
|
||||
getInputAndOutputIndices(OpBuilder &b, Location loc, ArrayRef<Value> allIvs,
|
||||
SingleInputPoolingOp op) {
|
||||
auto mapsRange = op.indexing_maps().template getAsRange<AffineMapAttr>();
|
||||
auto mapsRange = op.getIndexingMapsArray();
|
||||
auto maps = llvm::to_vector<8>(
|
||||
llvm::map_range(mapsRange, [](AffineMapAttr a) { return a.getValue(); }));
|
||||
return InputAndOutputIndices{
|
||||
|
|
|
@ -143,7 +143,7 @@ namespace mlir {
|
|||
namespace linalg {
|
||||
|
||||
bool allIndexingsAreProjectedPermutation(LinalgOp op) {
|
||||
return llvm::all_of(op.getIndexingMaps(), [](AffineMap m) {
|
||||
return llvm::all_of(op.getIndexingMapsArray(), [](AffineMap m) {
|
||||
return m.isProjectedPermutation(/*allowZeroInResults=*/true);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -148,7 +148,7 @@ public:
|
|||
Location loc = prod.getLoc();
|
||||
SmallVector<Value> inputOps = prod.getInputOperands();
|
||||
SmallVector<Value> outputOps = op.getOutputOperands();
|
||||
SmallVector<AffineMap> fusedIndexMaps = prod.getIndexingMaps();
|
||||
SmallVector<AffineMap> fusedIndexMaps = prod.getIndexingMapsArray();
|
||||
inputOps.push_back(op.getInputOperand(1 - other)->get());
|
||||
fusedIndexMaps.push_back(fusedIndexMaps.back()); // mimic other
|
||||
// Fuse producer and consumer into a new generic op.
|
||||
|
|
|
@ -677,8 +677,8 @@ static LogicalResult verifyOutputShape(
|
|||
// types fully define the result vector type. This assumes the affine maps
|
||||
// are well-formed, which must have been verified already.
|
||||
MLIRContext *ctx = op.getContext();
|
||||
AffineMap lhsMap = op.getIndexingMaps()[0];
|
||||
AffineMap rhsMap = op.getIndexingMaps()[1];
|
||||
AffineMap lhsMap = op.getIndexingMapsArray()[0];
|
||||
AffineMap rhsMap = op.getIndexingMapsArray()[1];
|
||||
if (getUnusedDimsBitVector({lhsMap, rhsMap}).any())
|
||||
return op.emitOpError(
|
||||
"expected all dimensions to be either a LHS or a RHS dimension");
|
||||
|
@ -697,7 +697,7 @@ static LogicalResult verifyOutputShape(
|
|||
return op.emitOpError("expected all dimensions to get an extent as "
|
||||
"either a LHS or a RHS dimension");
|
||||
|
||||
AffineMap resMap = op.getIndexingMaps()[2];
|
||||
AffineMap resMap = op.getIndexingMapsArray()[2];
|
||||
auto extentsMap = AffineMap::get(/*dimCount=*/extents.size(),
|
||||
/*symCount=*/0, extents, ctx);
|
||||
// Compose the resMap with the extentsMap, which is a constant map.
|
||||
|
@ -728,14 +728,14 @@ LogicalResult ContractionOp::verify() {
|
|||
auto resType = getResultType();
|
||||
|
||||
// Verify that an indexing map was specified for each vector operand.
|
||||
if (getIndexingMaps().size() != 3)
|
||||
if (getIndexingMapsArray().size() != 3)
|
||||
return emitOpError("expected an indexing map for each vector operand");
|
||||
|
||||
// Verify that each index map has 'numIterators' inputs, no symbols, and
|
||||
// that the number of map outputs equals the rank of its associated
|
||||
// vector operand.
|
||||
unsigned numIterators = getIteratorTypes().getValue().size();
|
||||
for (const auto &it : llvm::enumerate(getIndexingMaps())) {
|
||||
for (const auto &it : llvm::enumerate(getIndexingMapsArray())) {
|
||||
auto index = it.index();
|
||||
auto map = it.value();
|
||||
if (map.getNumSymbols() != 0)
|
||||
|
@ -833,7 +833,7 @@ void ContractionOp::getIterationBounds(
|
|||
SmallVectorImpl<int64_t> &iterationBounds) {
|
||||
auto lhsShape = getLhsType().getShape();
|
||||
auto resVectorType = getResultType().dyn_cast<VectorType>();
|
||||
SmallVector<AffineMap, 4> indexingMaps(getIndexingMaps());
|
||||
SmallVector<AffineMap, 4> indexingMaps(getIndexingMapsArray());
|
||||
SmallVector<int64_t, 2> iterationShape;
|
||||
for (const auto &it : llvm::enumerate(getIteratorTypes())) {
|
||||
// Search lhs/rhs map results for 'targetExpr'.
|
||||
|
@ -856,9 +856,9 @@ void ContractionOp::getIterationBounds(
|
|||
|
||||
void ContractionOp::getIterationIndexMap(
|
||||
std::vector<DenseMap<int64_t, int64_t>> &iterationIndexMap) {
|
||||
unsigned numMaps = getIndexingMaps().size();
|
||||
unsigned numMaps = getIndexingMapsArray().size();
|
||||
iterationIndexMap.resize(numMaps);
|
||||
for (const auto &it : llvm::enumerate(getIndexingMaps())) {
|
||||
for (const auto &it : llvm::enumerate(getIndexingMapsArray())) {
|
||||
auto index = it.index();
|
||||
auto map = it.value();
|
||||
for (unsigned i = 0, e = map.getNumResults(); i < e; ++i) {
|
||||
|
@ -869,13 +869,13 @@ void ContractionOp::getIterationIndexMap(
|
|||
}
|
||||
|
||||
std::vector<std::pair<int64_t, int64_t>> ContractionOp::getContractingDimMap() {
|
||||
SmallVector<AffineMap, 4> indexingMaps(getIndexingMaps());
|
||||
SmallVector<AffineMap, 4> indexingMaps(getIndexingMapsArray());
|
||||
return getDimMap(indexingMaps, getIteratorTypes(),
|
||||
getReductionIteratorTypeName(), getContext());
|
||||
}
|
||||
|
||||
std::vector<std::pair<int64_t, int64_t>> ContractionOp::getBatchDimMap() {
|
||||
SmallVector<AffineMap, 4> indexingMaps(getIndexingMaps());
|
||||
SmallVector<AffineMap, 4> indexingMaps(getIndexingMapsArray());
|
||||
return getDimMap(indexingMaps, getIteratorTypes(),
|
||||
getParallelIteratorTypeName(), getContext());
|
||||
}
|
||||
|
|
|
@ -301,7 +301,7 @@ struct CastAwayContractionLeadingOneDim
|
|||
// greedily to drop more.
|
||||
int64_t dropDim = 1;
|
||||
|
||||
auto oldIndexingMaps = contractOp.getIndexingMaps();
|
||||
auto oldIndexingMaps = contractOp.getIndexingMapsArray();
|
||||
SmallVector<AffineMap> newIndexingMaps;
|
||||
|
||||
auto oldIteratorTypes = contractOp.getIteratorTypes();
|
||||
|
|
|
@ -612,8 +612,8 @@ struct ContractOpToElementwise
|
|||
return failure();
|
||||
ArrayRef<int64_t> lhsShape = contractOp.getLhsType().getShape();
|
||||
ArrayRef<int64_t> rhsShape = contractOp.getRhsType().getShape();
|
||||
AffineMap lhsMap = contractOp.getIndexingMaps()[0];
|
||||
AffineMap rhsMap = contractOp.getIndexingMaps()[1];
|
||||
AffineMap lhsMap = contractOp.getIndexingMapsArray()[0];
|
||||
AffineMap rhsMap = contractOp.getIndexingMapsArray()[1];
|
||||
SmallVector<int64_t> lhsReductionDims =
|
||||
getReductionIndex(lhsMap, contractOp.getIteratorTypes());
|
||||
SmallVector<int64_t> rhsReductionDims =
|
||||
|
@ -627,7 +627,7 @@ struct ContractOpToElementwise
|
|||
if (rhsShape[dim] != 1)
|
||||
return failure();
|
||||
}
|
||||
AffineMap accMap = contractOp.getIndexingMaps()[2];
|
||||
AffineMap accMap = contractOp.getIndexingMapsArray()[2];
|
||||
unsigned numParallelDims = accMap.getNumResults();
|
||||
unsigned numLhsDimToBroadcast =
|
||||
numParallelDims - (lhsMap.getNumResults() - lhsReductionDims.size());
|
||||
|
@ -1035,7 +1035,7 @@ struct CombineContractTranspose
|
|||
LogicalResult matchAndRewrite(vector::ContractionOp contractOp,
|
||||
PatternRewriter &rewriter) const override {
|
||||
SmallVector<AffineMap, 4> maps =
|
||||
llvm::to_vector<4>(contractOp.getIndexingMaps());
|
||||
llvm::to_vector<4>(contractOp.getIndexingMapsArray());
|
||||
Value lhs = contractOp.getLhs();
|
||||
Value rhs = contractOp.getRhs();
|
||||
size_t index = 0;
|
||||
|
@ -1092,7 +1092,7 @@ struct CombineContractBroadcast
|
|||
LogicalResult matchAndRewrite(vector::ContractionOp contractOp,
|
||||
PatternRewriter &rewriter) const override {
|
||||
SmallVector<AffineMap, 4> maps =
|
||||
llvm::to_vector<4>(contractOp.getIndexingMaps());
|
||||
llvm::to_vector<4>(contractOp.getIndexingMapsArray());
|
||||
Value lhs = contractOp.getLhs();
|
||||
Value rhs = contractOp.getRhs();
|
||||
size_t index = 0;
|
||||
|
@ -1385,7 +1385,7 @@ ContractionOpToMatmulOpLowering::matchAndRewrite(vector::ContractionOp op,
|
|||
bindDims(rew.getContext(), m, n, k);
|
||||
// LHS must be A(m, k) or A(k, m).
|
||||
Value lhs = op.getLhs();
|
||||
auto lhsMap = op.getIndexingMaps()[0];
|
||||
auto lhsMap = op.getIndexingMapsArray()[0];
|
||||
if (lhsMap == AffineMap::get(3, 0, {k, m}, ctx))
|
||||
lhs = rew.create<vector::TransposeOp>(loc, lhs, ArrayRef<int64_t>{1, 0});
|
||||
else if (lhsMap != AffineMap::get(3, 0, {m, k}, ctx))
|
||||
|
@ -1393,7 +1393,7 @@ ContractionOpToMatmulOpLowering::matchAndRewrite(vector::ContractionOp op,
|
|||
|
||||
// RHS must be B(k, n) or B(n, k).
|
||||
Value rhs = op.getRhs();
|
||||
auto rhsMap = op.getIndexingMaps()[1];
|
||||
auto rhsMap = op.getIndexingMapsArray()[1];
|
||||
if (rhsMap == AffineMap::get(3, 0, {n, k}, ctx))
|
||||
rhs = rew.create<vector::TransposeOp>(loc, rhs, ArrayRef<int64_t>{1, 0});
|
||||
else if (rhsMap != AffineMap::get(3, 0, {k, n}, ctx))
|
||||
|
@ -1423,7 +1423,7 @@ ContractionOpToMatmulOpLowering::matchAndRewrite(vector::ContractionOp op,
|
|||
mul);
|
||||
|
||||
// ACC must be C(m, n) or C(n, m).
|
||||
auto accMap = op.getIndexingMaps()[2];
|
||||
auto accMap = op.getIndexingMapsArray()[2];
|
||||
if (accMap == AffineMap::get(3, 0, {n, m}, ctx))
|
||||
mul = rew.create<vector::TransposeOp>(loc, mul, ArrayRef<int64_t>{1, 0});
|
||||
else if (accMap != AffineMap::get(3, 0, {m, n}, ctx))
|
||||
|
@ -1659,7 +1659,7 @@ ContractionOpToDotLowering::matchAndRewrite(vector::ContractionOp op,
|
|||
auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); };
|
||||
AffineExpr m, n, k;
|
||||
bindDims(rewriter.getContext(), m, n, k);
|
||||
SmallVector<AffineMap, 4> maps = op.getIndexingMaps();
|
||||
SmallVector<AffineMap, 4> maps = op.getIndexingMapsArray();
|
||||
//
|
||||
// In the following we wish to make the reduction dimension innermost so we
|
||||
// can load vectors and just fmul + reduce into a scalar.
|
||||
|
@ -1868,7 +1868,7 @@ ContractionOpLowering::lowerParallel(vector::ContractionOp op, int64_t lhsIndex,
|
|||
VectorType rhsType = op.getRhsType();
|
||||
VectorType resType = op.getResultType().cast<VectorType>();
|
||||
// Find the iterator type index and result index.
|
||||
SmallVector<AffineMap, 4> iMap = op.getIndexingMaps();
|
||||
SmallVector<AffineMap, 4> iMap = op.getIndexingMapsArray();
|
||||
int64_t iterIndex = -1;
|
||||
int64_t dimSize = -1;
|
||||
if (lhsIndex >= 0) {
|
||||
|
@ -1939,7 +1939,7 @@ ContractionOpLowering::lowerReduction(vector::ContractionOp op,
|
|||
bool isInt = resType.isa<IntegerType>();
|
||||
// Use iterator index 0.
|
||||
int64_t iterIndex = 0;
|
||||
SmallVector<AffineMap, 4> iMap = op.getIndexingMaps();
|
||||
SmallVector<AffineMap, 4> iMap = op.getIndexingMapsArray();
|
||||
Optional<int64_t> lookupLhs = getResultIndex(iMap[0], iterIndex);
|
||||
Optional<int64_t> lookupRhs = getResultIndex(iMap[1], iterIndex);
|
||||
if (!lookupLhs.has_value())
|
||||
|
|
|
@ -320,7 +320,7 @@ struct UnrollContractionPattern
|
|||
|
||||
Location loc = contractOp.getLoc();
|
||||
unsigned accIndex = vector::ContractionOp::getAccOperandIndex();
|
||||
AffineMap dstAffineMap = contractOp.getIndexingMaps()[accIndex];
|
||||
AffineMap dstAffineMap = contractOp.getIndexingMapsArray()[accIndex];
|
||||
llvm::MapVector<
|
||||
SmallVector<int64_t>, Value,
|
||||
llvm::DenseMap<SmallVector<int64_t>, unsigned, OffsetMapInfo>>
|
||||
|
@ -347,7 +347,7 @@ struct UnrollContractionPattern
|
|||
};
|
||||
|
||||
// Extract the new lhs operand.
|
||||
AffineMap lhsPermutationMap = contractOp.getIndexingMaps()[0];
|
||||
AffineMap lhsPermutationMap = contractOp.getIndexingMapsArray()[0];
|
||||
SmallVector<int64_t> lhsOffets =
|
||||
applyPermutationMap(lhsPermutationMap, ArrayRef<int64_t>(offsets));
|
||||
extractOperand(0, contractOp.getLhs(), lhsPermutationMap, lhsOffets);
|
||||
|
@ -357,7 +357,7 @@ struct UnrollContractionPattern
|
|||
lhsOffets);
|
||||
|
||||
// Extract the new rhs operand.
|
||||
AffineMap rhsPermutationMap = contractOp.getIndexingMaps()[1];
|
||||
AffineMap rhsPermutationMap = contractOp.getIndexingMapsArray()[1];
|
||||
SmallVector<int64_t> rhsOffets =
|
||||
applyPermutationMap(rhsPermutationMap, ArrayRef<int64_t>(offsets));
|
||||
extractOperand(1, contractOp.getRhs(), rhsPermutationMap, rhsOffets);
|
||||
|
@ -366,7 +366,7 @@ struct UnrollContractionPattern
|
|||
extractOperand(4, contractOp.getMasks()[1], rhsPermutationMap,
|
||||
rhsOffets);
|
||||
|
||||
AffineMap accPermutationMap = contractOp.getIndexingMaps()[2];
|
||||
AffineMap accPermutationMap = contractOp.getIndexingMapsArray()[2];
|
||||
SmallVector<int64_t> accOffets =
|
||||
applyPermutationMap(accPermutationMap, ArrayRef<int64_t>(offsets));
|
||||
// If a version of the accumulator has already been computed, use it
|
||||
|
@ -579,7 +579,7 @@ struct ContractExtractPattern : public OpRewritePattern<vector::ExtractMapOp> {
|
|||
return failure();
|
||||
Location loc = contract.getLoc();
|
||||
unsigned accIndex = vector::ContractionOp::getAccOperandIndex();
|
||||
AffineMap affineMap = contract.getIndexingMaps()[accIndex];
|
||||
AffineMap affineMap = contract.getIndexingMapsArray()[accIndex];
|
||||
// Create a map of the dimensions distributed based on the acc affine map.
|
||||
// Only parallel dimensions are being distributed, reduction dimensions are
|
||||
// untouched.
|
||||
|
@ -587,7 +587,7 @@ struct ContractExtractPattern : public OpRewritePattern<vector::ExtractMapOp> {
|
|||
for (unsigned i : llvm::seq(unsigned(0), affineMap.getNumResults()))
|
||||
map[affineMap.getDimPosition(i)] = extract.getResultType().getDimSize(i);
|
||||
SmallVector<Value, 4> extractOperands;
|
||||
for (const auto &it : llvm::enumerate(contract.getIndexingMaps())) {
|
||||
for (const auto &it : llvm::enumerate(contract.getIndexingMapsArray())) {
|
||||
// For each operands calculate the new vector type after distribution.
|
||||
Value operand = contract->getOperand(it.index());
|
||||
auto vecType = operand.getType().cast<VectorType>();
|
||||
|
|
|
@ -2750,7 +2750,7 @@ def TestLinalgConvOp :
|
|||
return getOperation()->getAttrOfType<mlir::ArrayAttr>("iterator_types");
|
||||
}
|
||||
|
||||
mlir::ArrayAttr indexing_maps() {
|
||||
mlir::ArrayAttr getIndexingMaps() {
|
||||
return getOperation()->getAttrOfType<mlir::ArrayAttr>("indexing_maps");
|
||||
}
|
||||
|
||||
|
@ -2808,7 +2808,7 @@ def TestLinalgFillOp :
|
|||
return getOperation()->getAttrOfType<mlir::ArrayAttr>("iterator_types");
|
||||
}
|
||||
|
||||
mlir::ArrayAttr indexing_maps() {
|
||||
mlir::ArrayAttr getIndexingMaps() {
|
||||
return getOperation()->getAttrOfType<mlir::ArrayAttr>("indexing_maps");
|
||||
}
|
||||
|
||||
|
|
|
@ -172,7 +172,7 @@ structured_op: !LinalgStructuredOpConfig
|
|||
# IMPL: cst3 = self.strides().getValues<int64_t>()[1];
|
||||
# IMPL-NEXT: getAffineConstantExpr(cst3, context)
|
||||
|
||||
# IMPL: Test2Op::indexing_maps()
|
||||
# IMPL: Test2Op::getIndexingMaps()
|
||||
# IMPL: = getSymbolBindings(*this);
|
||||
# IMPL: "affine_map<(d0, d1)[s0, s1, s2, s3] -> (d1 * s2, d0 * s3)>"
|
||||
# IMPL: "affine_map<(d0, d1)[s0, s1, s2, s3] -> (d0, d1)>"
|
||||
|
@ -238,7 +238,7 @@ structured_op: !LinalgStructuredOpConfig
|
|||
# IMPL: Test3Op::iterator_types() {
|
||||
# IMPL-NEXT: int64_t rank = getRank(getOutputOperand(0));
|
||||
|
||||
# IMPL: Test3Op::indexing_maps() {
|
||||
# IMPL: Test3Op::getIndexingMaps() {
|
||||
# IMPL-NEXT: MLIRContext *context = getContext();
|
||||
# IMPL-NEXT: AffineMap scalarMap = AffineMap::get(getNumParallelLoops(), 0, context);
|
||||
# IMPL-NEXT: AffineMap tensorMap = AffineMap::getMultiDimIdentityMap(
|
||||
|
|
|
@ -553,7 +553,7 @@ def {0} : LinalgStructuredBase_Op<"{1}", !listconcat([AttrSizedOperandSegments],
|
|||
let extraClassDeclaration = structuredOpsBaseDecls # [{{
|
||||
// Auto-generated.
|
||||
ArrayAttr iterator_types();
|
||||
ArrayAttr indexing_maps();
|
||||
ArrayAttr getIndexingMaps();
|
||||
static void regionBuilder(ImplicitLocOpBuilder &b,
|
||||
Block &block, ArrayRef<NamedAttribute> attrs);
|
||||
static std::function<void(ImplicitLocOpBuilder &,
|
||||
|
@ -612,7 +612,7 @@ ArrayAttr {0}::iterator_types() {{
|
|||
// {1}: Comma-separated list of dimension variable names.
|
||||
// {2}: Statements
|
||||
static const char structuredOpIndexingMapsFormat[] = R"FMT(
|
||||
ArrayAttr {0}::indexing_maps() {{
|
||||
ArrayAttr {0}::getIndexingMaps() {{
|
||||
static const char memoizeAttr[] = "linalg.memoized_indexing_maps";
|
||||
ArrayAttr cached = getOperation()->getAttrOfType<ArrayAttr>(memoizeAttr);
|
||||
if (cached)
|
||||
|
@ -631,7 +631,7 @@ ArrayAttr {0}::indexing_maps() {{
|
|||
// The indexing_maps() method for rank polymorphic structured ops. Parameters:
|
||||
// {0}: Class name
|
||||
static const char rankPolyStructuredOpIndexingMapsFormat[] = R"FMT(
|
||||
ArrayAttr {0}::indexing_maps() {{
|
||||
ArrayAttr {0}::getIndexingMaps() {{
|
||||
MLIRContext *context = getContext();
|
||||
AffineMap scalarMap = AffineMap::get(getNumParallelLoops(), 0, context);
|
||||
AffineMap tensorMap = AffineMap::getMultiDimIdentityMap(
|
||||
|
@ -819,7 +819,7 @@ generateNamedGenericOpDefns(LinalgOpConfig &opConfig,
|
|||
os << llvm::formatv(rankPolyStructuredOpIteratorTypesFormat, className);
|
||||
}
|
||||
|
||||
// Generating the indexing_maps() method.
|
||||
// Generating the getIndexingMaps() method.
|
||||
if (auto &staticMaps =
|
||||
opConfig.structuredOp->indexingMaps.staticIndexingMaps) {
|
||||
if (staticMaps->empty())
|
||||
|
|
Loading…
Reference in New Issue