llvm-project/mlir/lib/IR/AffineMap.cpp

324 lines
11 KiB
C++
Raw Normal View History

//===- AffineMap.cpp - MLIR Affine Map Classes ----------------------------===//
//
// Copyright 2019 The MLIR Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// =============================================================================
#include "mlir/IR/AffineMap.h"
#include "AffineMapDetail.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/StandardTypes.h"
#include "mlir/Support/Functional.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Support/MathExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/raw_ostream.h"
using namespace mlir;
namespace {
// AffineExprConstantFolder evaluates an affine expression using constant
// operands passed in 'operandConsts'. Returns an IntegerAttr attribute
// representing the constant value of the affine expression evaluated on
// constant 'operandConsts', or nullptr if it can't be folded.
class AffineExprConstantFolder {
public:
AffineExprConstantFolder(unsigned numDims, ArrayRef<Attribute> operandConsts)
: numDims(numDims), operandConsts(operandConsts) {}
/// Attempt to constant fold the specified affine expr, or return null on
/// failure.
IntegerAttr constantFold(AffineExpr expr) {
if (auto result = constantFoldImpl(expr))
return IntegerAttr::get(IndexType::get(expr.getContext()), *result);
return nullptr;
}
private:
llvm::Optional<int64_t> constantFoldImpl(AffineExpr expr) {
switch (expr.getKind()) {
case AffineExprKind::Add:
return constantFoldBinExpr(
expr, [](int64_t lhs, int64_t rhs) { return lhs + rhs; });
case AffineExprKind::Mul:
return constantFoldBinExpr(
expr, [](int64_t lhs, int64_t rhs) { return lhs * rhs; });
case AffineExprKind::Mod:
return constantFoldBinExpr(
expr, [](int64_t lhs, int64_t rhs) { return mod(lhs, rhs); });
case AffineExprKind::FloorDiv:
return constantFoldBinExpr(
expr, [](int64_t lhs, int64_t rhs) { return floorDiv(lhs, rhs); });
case AffineExprKind::CeilDiv:
return constantFoldBinExpr(
expr, [](int64_t lhs, int64_t rhs) { return ceilDiv(lhs, rhs); });
case AffineExprKind::Constant:
return expr.cast<AffineConstantExpr>().getValue();
case AffineExprKind::DimId:
if (auto attr = operandConsts[expr.cast<AffineDimExpr>().getPosition()]
.dyn_cast_or_null<IntegerAttr>())
return attr.getInt();
return llvm::None;
case AffineExprKind::SymbolId:
if (auto attr = operandConsts[numDims +
expr.cast<AffineSymbolExpr>().getPosition()]
.dyn_cast_or_null<IntegerAttr>())
return attr.getInt();
return llvm::None;
}
llvm_unreachable("Unknown AffineExpr");
}
// TODO: Change these to operate on APInts too.
llvm::Optional<int64_t> constantFoldBinExpr(AffineExpr expr,
int64_t (*op)(int64_t, int64_t)) {
auto binOpExpr = expr.cast<AffineBinaryOpExpr>();
if (auto lhs = constantFoldImpl(binOpExpr.getLHS()))
if (auto rhs = constantFoldImpl(binOpExpr.getRHS()))
return op(*lhs, *rhs);
return llvm::None;
}
// The number of dimension operands in AffineMap containing this expression.
unsigned numDims;
// The constant valued operands used to evaluate this AffineExpr.
ArrayRef<Attribute> operandConsts;
};
} // end anonymous namespace
/// Returns a single constant result affine map.
AffineMap AffineMap::getConstantMap(int64_t val, MLIRContext *context) {
return get(/*dimCount=*/0, /*symbolCount=*/0,
{getAffineConstantExpr(val, context)});
}
AffineMap AffineMap::getMultiDimIdentityMap(unsigned numDims,
MLIRContext *context) {
SmallVector<AffineExpr, 4> dimExprs;
dimExprs.reserve(numDims);
for (unsigned i = 0; i < numDims; ++i)
dimExprs.push_back(mlir::getAffineDimExpr(i, context));
return get(/*dimCount=*/numDims, /*symbolCount=*/0, dimExprs);
}
MLIRContext *AffineMap::getContext() const { return map->context; }
bool AffineMap::isIdentity() const {
if (getNumDims() != getNumResults())
return false;
ArrayRef<AffineExpr> results = getResults();
for (unsigned i = 0, numDims = getNumDims(); i < numDims; ++i) {
auto expr = results[i].dyn_cast<AffineDimExpr>();
if (!expr || expr.getPosition() != i)
return false;
}
return true;
}
bool AffineMap::isEmpty() const {
return getNumDims() == 0 && getNumSymbols() == 0 && getNumResults() == 0;
}
bool AffineMap::isSingleConstant() const {
return getNumResults() == 1 && getResult(0).isa<AffineConstantExpr>();
}
int64_t AffineMap::getSingleConstantResult() const {
assert(isSingleConstant() && "map must have a single constant result");
return getResult(0).cast<AffineConstantExpr>().getValue();
}
unsigned AffineMap::getNumDims() const {
assert(map && "uninitialized map storage");
return map->numDims;
}
unsigned AffineMap::getNumSymbols() const {
assert(map && "uninitialized map storage");
return map->numSymbols;
}
unsigned AffineMap::getNumResults() const {
assert(map && "uninitialized map storage");
return map->results.size();
}
unsigned AffineMap::getNumInputs() const {
assert(map && "uninitialized map storage");
return map->numDims + map->numSymbols;
}
ArrayRef<AffineExpr> AffineMap::getResults() const {
assert(map && "uninitialized map storage");
return map->results;
}
AffineExpr AffineMap::getResult(unsigned idx) const {
assert(map && "uninitialized map storage");
return map->results[idx];
}
[RFC][MLIR] Use AffineExprRef in place of AffineExpr* in IR This CL starts by replacing AffineExpr* with value-type AffineExprRef in a few places in the IR. By a domino effect that is pretty telling of the inconsistencies in the codebase, const is removed where it makes sense. The rationale is that the decision was concisously made that unique'd types have pointer semantics without const specifier. This is fine but we should be consistent. In the end, the only logical invariant is that there should never be such a thing as a const AffineExpr*, const AffineMap* or const IntegerSet* in our codebase. This CL takes a number of shortcuts to killing const with fire, in particular forcing const AffineExprRef to return the underlying non-const AffineExpr*. This will be removed once AffineExpr* has disappeared in containers but for now such shortcuts allow a bit of sanity in this long quest for cleanups. The **only** places where const AffineExpr*, const AffineMap* or const IntegerSet* may still appear is by transitive needs from containers, comparison operators etc. There is still one major thing remaining here: figure out why cast/dyn_cast return me a const AffineXXX*, which in turn requires a bunch of ugly const_casts. I suspect this is due to the classof taking const AffineXXXExpr*. I wonder whether this is a side effect of 1., if it is coming from llvm itself (I'd doubt it) or something else (clattner@?) In light of this, the whole discussion about const makes total sense to me now and I would systematically apply the rule that in the end, we should never have any const XXX in our codebase for unique'd types (assuming we can remove them all in containers and no additional constness constraint is added on us from the outside world). PiperOrigin-RevId: 215811554
2018-10-05 06:10:33 +08:00
/// Folds the results of the application of an affine map on the provided
/// operands to a constant if possible. Returns false if the folding happens,
/// true otherwise.
LogicalResult
AffineMap::constantFold(ArrayRef<Attribute> operandConstants,
SmallVectorImpl<Attribute> &results) const {
assert(getNumInputs() == operandConstants.size());
// Fold each of the result expressions.
AffineExprConstantFolder exprFolder(getNumDims(), operandConstants);
// Constant fold each AffineExpr in AffineMap and add to 'results'.
for (auto expr : getResults()) {
auto folded = exprFolder.constantFold(expr);
// If we didn't fold to a constant, then folding fails.
if (!folded)
return failure();
results.push_back(folded);
}
assert(results.size() == getNumResults() &&
"constant folding produced the wrong number of results");
return success();
}
/// Walk all of the AffineExpr's in this mapping. Each node in an expression
/// tree is visited in postorder.
void AffineMap::walkExprs(std::function<void(AffineExpr)> callback) const {
for (auto expr : getResults())
expr.walk(callback);
}
/// This method substitutes any uses of dimensions and symbols (e.g.
/// dim#0 with dimReplacements[0]) in subexpressions and returns the modified
/// expression mapping. Because this can be used to eliminate dims and
/// symbols, the client needs to specify the number of dims and symbols in
/// the result. The returned map always has the same number of results.
AffineMap AffineMap::replaceDimsAndSymbols(ArrayRef<AffineExpr> dimReplacements,
ArrayRef<AffineExpr> symReplacements,
unsigned numResultDims,
unsigned numResultSyms) {
SmallVector<AffineExpr, 8> results;
results.reserve(getNumResults());
for (auto expr : getResults())
results.push_back(
expr.replaceDimsAndSymbols(dimReplacements, symReplacements));
return get(numResultDims, numResultSyms, results);
}
AffineMap AffineMap::compose(AffineMap map) {
assert(getNumDims() == map.getNumResults() && "Number of results mismatch");
// Prepare `map` by concatenating the symbols and rewriting its exprs.
unsigned numDims = map.getNumDims();
unsigned numSymbolsThisMap = getNumSymbols();
unsigned numSymbols = numSymbolsThisMap + map.getNumSymbols();
SmallVector<AffineExpr, 8> newDims(numDims);
for (unsigned idx = 0; idx < numDims; ++idx) {
newDims[idx] = getAffineDimExpr(idx, getContext());
}
SmallVector<AffineExpr, 8> newSymbols(numSymbols);
for (unsigned idx = numSymbolsThisMap; idx < numSymbols; ++idx) {
newSymbols[idx - numSymbolsThisMap] =
getAffineSymbolExpr(idx, getContext());
}
auto newMap =
map.replaceDimsAndSymbols(newDims, newSymbols, numDims, numSymbols);
SmallVector<AffineExpr, 8> exprs;
exprs.reserve(getResults().size());
for (auto expr : getResults())
exprs.push_back(expr.compose(newMap));
return AffineMap::get(numDims, numSymbols, exprs);
}
bool AffineMap::isProjectedPermutation() {
if (getNumSymbols() > 0)
return false;
SmallVector<bool, 8> seen(getNumInputs(), false);
for (auto expr : getResults()) {
if (auto dim = expr.dyn_cast<AffineDimExpr>()) {
if (seen[dim.getPosition()])
return false;
seen[dim.getPosition()] = true;
continue;
}
return false;
}
return true;
}
bool AffineMap::isPermutation() {
if (getNumDims() != getNumResults())
return false;
return isProjectedPermutation();
}
AffineMap AffineMap::getSubMap(ArrayRef<unsigned> resultPos) {
SmallVector<AffineExpr, 4> exprs;
exprs.reserve(resultPos.size());
for (auto idx : resultPos) {
exprs.push_back(getResult(idx));
}
return AffineMap::get(getNumDims(), getNumSymbols(), exprs);
}
AffineMap mlir::simplifyAffineMap(AffineMap map) {
SmallVector<AffineExpr, 8> exprs;
for (auto e : map.getResults()) {
exprs.push_back(
simplifyAffineExpr(e, map.getNumDims(), map.getNumSymbols()));
}
return AffineMap::get(map.getNumDims(), map.getNumSymbols(), exprs);
}
AffineMap mlir::inversePermutation(AffineMap map) {
if (!map)
return map;
assert(map.getNumSymbols() == 0 && "expected map without symbols");
SmallVector<AffineExpr, 4> exprs(map.getNumDims());
for (auto en : llvm::enumerate(map.getResults())) {
auto expr = en.value();
// Skip non-permutations.
if (auto d = expr.dyn_cast<AffineDimExpr>()) {
if (exprs[d.getPosition()])
continue;
exprs[d.getPosition()] = getAffineDimExpr(en.index(), d.getContext());
}
}
SmallVector<AffineExpr, 4> seenExprs;
seenExprs.reserve(map.getNumDims());
for (auto expr : exprs)
if (expr)
seenExprs.push_back(expr);
Add a generic Linalg op This CL introduces a linalg.generic op to represent generic tensor contraction operations on views. A linalg.generic operation requires a numbers of attributes that are sufficient to emit the computation in scalar form as well as compute the appropriate subviews to enable tiling and fusion. These attributes are very similar to the attributes for existing operations such as linalg.matmul etc and existing operations can be implemented with the generic form. In the future, most existing operations can be implemented using the generic form. This CL starts by splitting out most of the functionality of the linalg::NInputsAndOutputs trait into a ViewTrait that queries the per-instance properties of the op. This allows using the attribute informations. This exposes an ordering of verifiers issue where ViewTrait::verify uses attributes but the verifiers for those attributes have not been run. The desired behavior would be for the verifiers of the attributes specified in the builder to execute first but it is not the case atm. As a consequence, to emit proper error messages and avoid crashing, some of the linalg.generic methods are defensive as such: ``` unsigned getNumInputs() { // This is redundant with the `n_views` attribute verifier but ordering of verifiers // may exhibit cases where we crash instead of emitting an error message. if (!getAttr("n_views") || n_views().getValue().size() != 2) return 0; ``` In pretty-printed form, the specific attributes required for linalg.generic are factored out in an independent dictionary named "_". When parsing its content is flattened and the "_name" is dropped. This allows using aliasing for reducing boilerplate at each linalg.generic invocation while benefiting from the Tablegen'd verifier form for each named attribute in the dictionary. For instance, implementing linalg.matmul in terms of linalg.generic resembles: ``` func @mac(%a: f32, %b: f32, %c: f32) -> f32 { %d = mulf %a, %b: f32 %e = addf %c, %d: f32 return %e: f32 } #matmul_accesses = [ (m, n, k) -> (m, k), (m, n, k) -> (k, n), (m, n, k) -> (m, n) ] #matmul_trait = { doc = "C(m, n) += A(m, k) * B(k, n)", fun = @mac, indexing_maps = #matmul_accesses, library_call = "linalg_matmul", n_views = [2, 1], n_loop_types = [2, 1, 0] } ``` And can be used in multiple places as: ``` linalg.generic #matmul_trait %A, %B, %C [other-attributes] : !linalg.view<?x?xf32>, !linalg.view<?x?xf32>, !linalg.view<?x?xf32> ``` In the future it would be great to have a mechanism to alias / register a new linalg.op as a pair of linalg.generic, #trait. Also, note that with one could theoretically only specify the `doc` string and parse all the attributes from it. PiperOrigin-RevId: 261338740
2019-08-03 00:53:08 +08:00
if (seenExprs.size() != map.getNumInputs())
return AffineMap();
return AffineMap::get(map.getNumResults(), 0, seenExprs);
}
AffineMap mlir::concatAffineMaps(ArrayRef<AffineMap> maps) {
unsigned numResults = 0;
for (auto m : maps)
numResults += m ? m.getNumResults() : 0;
unsigned numDims = 0;
llvm::SmallVector<AffineExpr, 8> results;
results.reserve(numResults);
for (auto m : maps) {
if (!m)
continue;
assert(m.getNumSymbols() == 0 && "expected map without symbols");
results.append(m.getResults().begin(), m.getResults().end());
numDims = std::max(m.getNumDims(), numDims);
}
return numDims == 0 ? AffineMap() : AffineMap::get(numDims, 0, results);
}