Fix misc bugs / TODOs / other improvements to analysis utils

- fix for getConstantBoundOnDimSize: floordiv -> ceildiv for extent
- make getConstantBoundOnDimSize also return the identifier upper bound
- fix unionBoundingBox to correctly use the divisor and upper bound identified by
  getConstantBoundOnDimSize
- deal with loop step correctly in addAffineForOpDomain (covers most cases now)
- fully compose bound map / operands and simplify/canonicalize before adding
  dim/symbol to FlatAffineConstraints; fixes false positives in -memref-bound-check; add
  test case there
- expose mlir::isTopLevelSymbol from AffineOps

PiperOrigin-RevId: 238050395
This commit is contained in:
Uday Bondhugula 2019-03-12 10:52:09 -07:00 committed by jpienaar
parent 7eee76b84c
commit 9f2781e8dd
9 changed files with 167 additions and 64 deletions

View File

@ -33,6 +33,10 @@ class AffineValueMap;
class FlatAffineConstraints; class FlatAffineConstraints;
class FuncBuilder; class FuncBuilder;
/// A utility function to check if a value is defined at the top level of a
/// function. A value defined at the top level is always a valid symbol.
bool isTopLevelSymbol(const Value *value);
class AffineOpsDialect : public Dialect { class AffineOpsDialect : public Dialect {
public: public:
AffineOpsDialect(MLIRContext *context); AffineOpsDialect(MLIRContext *context);

View File

@ -464,11 +464,12 @@ public:
void addId(IdKind kind, unsigned pos, Value *id = nullptr); void addId(IdKind kind, unsigned pos, Value *id = nullptr);
/// Add the specified values as a dim or symbol id depending on its nature, if /// Add the specified values as a dim or symbol id depending on its nature, if
/// it already doesn't exist in the system. The identifier is added to the end /// it already doesn't exist in the system. `id' has to be either a terminal
/// of the existing dims or symbols. Additional information on the identifier /// symbol or a loop IV, i.e., it cannot be the result affine.apply of any
/// is extracted from the IR (if it's a loop IV or a symbol, and added to the /// symbols or loop IVs. The identifier is added to the end of the existing
/// constraint system). /// dims or symbols. Additional information on the identifier is extracted
void addDimOrSymbolId(Value *id); /// from the IR and added to the constraint system.
void addInductionVarOrTerminalSymbol(Value *id);
/// Composes the affine value map with this FlatAffineConstrains, adding the /// Composes the affine value map with this FlatAffineConstrains, adding the
/// results of the map as dimensions at the front [0, vMap->getNumResults()) /// results of the map as dimensions at the front [0, vMap->getNumResults())
@ -619,7 +620,8 @@ public:
Optional<int64_t> Optional<int64_t>
getConstantBoundOnDimSize(unsigned pos, getConstantBoundOnDimSize(unsigned pos,
SmallVectorImpl<int64_t> *lb = nullptr, SmallVectorImpl<int64_t> *lb = nullptr,
int64_t *lbFloorDivisor = nullptr) const; int64_t *lbFloorDivisor = nullptr,
SmallVectorImpl<int64_t> *ub = nullptr) const;
/// Returns the constant lower bound for the pos^th identifier if there is /// Returns the constant lower bound for the pos^th identifier if there is
/// one; None otherwise. /// one; None otherwise.

View File

@ -40,8 +40,8 @@ AffineOpsDialect::AffineOpsDialect(MLIRContext *context)
} }
/// A utility function to check if a value is defined at the top level of a /// A utility function to check if a value is defined at the top level of a
/// function. /// function. A value defined at the top level is always a valid symbol.
static bool isDefinedAtTopLevel(const Value *value) { bool mlir::isTopLevelSymbol(const Value *value) {
if (auto *arg = dyn_cast<BlockArgument>(value)) if (auto *arg = dyn_cast<BlockArgument>(value))
return arg->getOwner()->getParent()->getContainingFunction(); return arg->getOwner()->getParent()->getContainingFunction();
return value->getDefiningInst()->getParentInst() == nullptr; return value->getDefiningInst()->getParentInst() == nullptr;
@ -65,7 +65,7 @@ bool mlir::isValidDim(const Value *value) {
// The dim op is okay if its operand memref/tensor is defined at the top // The dim op is okay if its operand memref/tensor is defined at the top
// level. // level.
if (auto dimOp = inst->dyn_cast<DimOp>()) if (auto dimOp = inst->dyn_cast<DimOp>())
return isDefinedAtTopLevel(dimOp->getOperand()); return isTopLevelSymbol(dimOp->getOperand());
return false; return false;
} }
// This value is a block argument (which also includes 'for' loop IVs). // This value is a block argument (which also includes 'for' loop IVs).
@ -90,7 +90,7 @@ bool mlir::isValidSymbol(const Value *value) {
// The dim op is okay if its operand memref/tensor is defined at the top // The dim op is okay if its operand memref/tensor is defined at the top
// level. // level.
if (auto dimOp = inst->dyn_cast<DimOp>()) if (auto dimOp = inst->dyn_cast<DimOp>())
return isDefinedAtTopLevel(dimOp->getOperand()); return isTopLevelSymbol(dimOp->getOperand());
return false; return false;
} }
// Otherwise, the only valid symbol is a top level block argument. // Otherwise, the only valid symbol is a top level block argument.

View File

@ -675,6 +675,7 @@ void MemRefAccess::getAccessMap(AffineValueMap *accessMap) const {
memref->getType().getContext()); memref->getType().getContext());
SmallVector<Value *, 8> operands(indices.begin(), indices.end()); SmallVector<Value *, 8> operands(indices.begin(), indices.end());
fullyComposeAffineMapAndOperands(&map, &operands); fullyComposeAffineMapAndOperands(&map, &operands);
map = simplifyAffineMap(map);
canonicalizeMapAndOperands(&map, &operands); canonicalizeMapAndOperands(&map, &operands);
accessMap->reset(map, operands); accessMap->reset(map, operands);
} }

View File

@ -709,24 +709,27 @@ void FlatAffineConstraints::convertLoopIVSymbolsToDims() {
} }
} }
void FlatAffineConstraints::addDimOrSymbolId(Value *id) { void FlatAffineConstraints::addInductionVarOrTerminalSymbol(Value *id) {
if (containsId(*id)) if (containsId(*id))
return; return;
if (isValidSymbol(id)) {
addSymbolId(getNumSymbolIds(), id); // Caller is expected to fully compose map/operands if necessary.
// Check if the symbol is a constant. assert((isTopLevelSymbol(id) || isForInductionVar(id)) &&
if (auto *opInst = id->getDefiningInst()) { "non-terminal symbol / loop IV expected");
if (auto constOp = opInst->dyn_cast<ConstantIndexOp>()) { // Outer loop IVs could be used in forOp's bounds.
setIdToConstant(*id, constOp->getValue()); if (auto loop = getForInductionVarOwner(id)) {
}
}
} else {
addDimId(getNumDimIds(), id); addDimId(getNumDimIds(), id);
if (auto loop = getForInductionVarOwner(id)) { if (failed(this->addAffineForOpDomain(loop)))
// Outer loop IVs could be used in forOp's bounds. LLVM_DEBUG(
if (failed(this->addAffineForOpDomain(loop))) loop->emitWarning("failed to add domain info to constraint system"));
LLVM_DEBUG(loop->emitWarning( return;
"failed to add domain info to constraint system")); }
// Add top level symbol.
addSymbolId(getNumSymbolIds(), id);
// Check if the symbol is a constant.
if (auto *opInst = id->getDefiningInst()) {
if (auto constOp = opInst->dyn_cast<ConstantIndexOp>()) {
setIdToConstant(*id, constOp->getValue());
} }
} }
} }
@ -740,11 +743,30 @@ FlatAffineConstraints::addAffineForOpDomain(ConstOpPointer<AffineForOp> forOp) {
return failure(); return failure();
} }
if (forOp->getStep() != 1)
LLVM_DEBUG(
forOp->emitWarning("Domain conservative: non-unit stride not handled"));
int64_t step = forOp->getStep(); int64_t step = forOp->getStep();
if (step != 1) {
if (!forOp->hasConstantLowerBound())
forOp->emitWarning("domain conservatively approximated");
else {
// Add constraints for the stride.
// (iv - lb) % step = 0 can be written as:
// (iv - lb) - step * q = 0 where q = (iv - lb) / step.
// Add local variable 'q' and add the above equality.
// The first constraint is q = (iv - lb) floordiv step
SmallVector<int64_t, 8> dividend(getNumCols(), 0);
int64_t lb = forOp->getConstantLowerBound();
dividend[pos] = 1;
dividend.back() -= lb;
addLocalFloorDiv(dividend, step);
// Second constraint: (iv - lb) - step * q = 0.
SmallVector<int64_t, 8> eq(getNumCols(), 0);
eq[pos] = 1;
eq.back() -= lb;
// For the local var just added above.
eq[getNumCols() - 2] = -step;
addEquality(eq);
}
}
if (forOp->hasConstantLowerBound()) { if (forOp->hasConstantLowerBound()) {
addConstantLowerBound(pos, forOp->getConstantLowerBound()); addConstantLowerBound(pos, forOp->getConstantLowerBound());
@ -760,7 +782,7 @@ FlatAffineConstraints::addAffineForOpDomain(ConstOpPointer<AffineForOp> forOp) {
} }
if (forOp->hasConstantUpperBound()) { if (forOp->hasConstantUpperBound()) {
addConstantUpperBound(pos, forOp->getConstantUpperBound() - step); addConstantUpperBound(pos, forOp->getConstantUpperBound() - 1);
return success(); return success();
} }
// Non-constant upper bound case. // Non-constant upper bound case.
@ -1617,8 +1639,8 @@ void FlatAffineConstraints::getSliceBounds(unsigned num, MLIRContext *context,
LogicalResult LogicalResult
FlatAffineConstraints::addLowerOrUpperBound(unsigned pos, AffineMap boundMap, FlatAffineConstraints::addLowerOrUpperBound(unsigned pos, AffineMap boundMap,
ArrayRef<Value *> operands, bool eq, ArrayRef<Value *> boundOperands,
bool lower) { bool eq, bool lower) {
assert(pos < getNumDimAndSymbolIds() && "invalid position"); assert(pos < getNumDimAndSymbolIds() && "invalid position");
// Equality follows the logic of lower bound except that we add an equality // Equality follows the logic of lower bound except that we add an equality
// instead of an inequality. // instead of an inequality.
@ -1626,12 +1648,19 @@ FlatAffineConstraints::addLowerOrUpperBound(unsigned pos, AffineMap boundMap,
if (eq) if (eq)
lower = true; lower = true;
// Fully commpose map and operands; canonicalize and simplify so that we
// transitively get to terminal symbols or loop IVs.
auto map = boundMap;
SmallVector<Value *, 4> operands(boundOperands.begin(), boundOperands.end());
fullyComposeAffineMapAndOperands(&map, &operands);
map = simplifyAffineMap(map);
canonicalizeMapAndOperands(&map, &operands);
for (auto *operand : operands) for (auto *operand : operands)
addDimOrSymbolId(operand); addInductionVarOrTerminalSymbol(operand);
FlatAffineConstraints localVarCst; FlatAffineConstraints localVarCst;
std::vector<SmallVector<int64_t, 8>> flatExprs; std::vector<SmallVector<int64_t, 8>> flatExprs;
if (failed(getFlattenedAffineExprs(boundMap, &flatExprs, &localVarCst))) { if (failed(getFlattenedAffineExprs(map, &flatExprs, &localVarCst))) {
LLVM_DEBUG(llvm::dbgs() << "semi-affine expressions not yet supported\n"); LLVM_DEBUG(llvm::dbgs() << "semi-affine expressions not yet supported\n");
return failure(); return failure();
} }
@ -1671,7 +1700,7 @@ FlatAffineConstraints::addLowerOrUpperBound(unsigned pos, AffineMap boundMap,
SmallVector<int64_t, 4> ineq(getNumCols(), 0); SmallVector<int64_t, 4> ineq(getNumCols(), 0);
ineq[pos] = lower ? 1 : -1; ineq[pos] = lower ? 1 : -1;
// Dims and symbols. // Dims and symbols.
for (unsigned j = 0, e = boundMap.getNumInputs(); j < e; j++) { for (unsigned j = 0, e = map.getNumInputs(); j < e; j++) {
ineq[positions[j]] = lower ? -flatExpr[j] : flatExpr[j]; ineq[positions[j]] = lower ? -flatExpr[j] : flatExpr[j];
} }
// Copy over the local id coefficients. // Copy over the local id coefficients.
@ -1961,7 +1990,8 @@ void FlatAffineConstraints::constantFoldIdRange(unsigned pos, unsigned num) {
// s0 - 7 <= 8*j <= s0 returns 1 with lb = s0, lbDivisor = 8 (since lb = // s0 - 7 <= 8*j <= s0 returns 1 with lb = s0, lbDivisor = 8 (since lb =
// ceil(s0 - 7 / 8) = floor(s0 / 8)). // ceil(s0 - 7 / 8) = floor(s0 / 8)).
Optional<int64_t> FlatAffineConstraints::getConstantBoundOnDimSize( Optional<int64_t> FlatAffineConstraints::getConstantBoundOnDimSize(
unsigned pos, SmallVectorImpl<int64_t> *lb, int64_t *lbFloorDivisor) const { unsigned pos, SmallVectorImpl<int64_t> *lb, int64_t *lbFloorDivisor,
SmallVectorImpl<int64_t> *ub) const {
assert(pos < getNumDimIds() && "Invalid identifier position"); assert(pos < getNumDimIds() && "Invalid identifier position");
assert(getNumLocalIds() == 0); assert(getNumLocalIds() == 0);
@ -1977,12 +2007,17 @@ Optional<int64_t> FlatAffineConstraints::getConstantBoundOnDimSize(
if (lb) { if (lb) {
// Set lb to the symbolic value. // Set lb to the symbolic value.
lb->resize(getNumSymbolIds() + 1); lb->resize(getNumSymbolIds() + 1);
if (ub)
ub->resize(getNumSymbolIds() + 1);
for (unsigned c = 0, f = getNumSymbolIds() + 1; c < f; c++) { for (unsigned c = 0, f = getNumSymbolIds() + 1; c < f; c++) {
int64_t v = atEq(eqRow, pos); int64_t v = atEq(eqRow, pos);
// atEq(eqRow, pos) is either -1 or 1. // atEq(eqRow, pos) is either -1 or 1.
assert(v * v == 1); assert(v * v == 1);
(*lb)[c] = v < 0 ? atEq(eqRow, getNumDimIds() + c) / -v (*lb)[c] = v < 0 ? atEq(eqRow, getNumDimIds() + c) / -v
: -atEq(eqRow, getNumDimIds() + c) / v; : -atEq(eqRow, getNumDimIds() + c) / v;
// Since this is an equality, ub = lb.
if (ub)
(*ub)[c] = (*lb)[c];
} }
assert(lbFloorDivisor && assert(lbFloorDivisor &&
"both lb and divisor or none should be provided"); "both lb and divisor or none should be provided");
@ -2028,7 +2063,7 @@ Optional<int64_t> FlatAffineConstraints::getConstantBoundOnDimSize(
// powerful. Not needed for hyper-rectangular iteration spaces. // powerful. Not needed for hyper-rectangular iteration spaces.
Optional<int64_t> minDiff = None; Optional<int64_t> minDiff = None;
unsigned minLbPosition; unsigned minLbPosition, minUbPosition;
for (auto ubPos : ubIndices) { for (auto ubPos : ubIndices) {
for (auto lbPos : lbIndices) { for (auto lbPos : lbIndices) {
// Look for a lower bound and an upper bound that only differ by a // Look for a lower bound and an upper bound that only differ by a
@ -2044,28 +2079,38 @@ Optional<int64_t> FlatAffineConstraints::getConstantBoundOnDimSize(
} }
if (j < getNumCols() - 1) if (j < getNumCols() - 1)
continue; continue;
int64_t diff = floorDiv(atIneq(ubPos, getNumCols() - 1) + int64_t diff = ceilDiv(atIneq(ubPos, getNumCols() - 1) +
atIneq(lbPos, getNumCols() - 1) + 1, atIneq(lbPos, getNumCols() - 1) + 1,
atIneq(lbPos, pos)); atIneq(lbPos, pos));
if (minDiff == None || diff < minDiff) { if (minDiff == None || diff < minDiff) {
minDiff = diff; minDiff = diff;
minLbPosition = lbPos; minLbPosition = lbPos;
minUbPosition = ubPos;
} }
} }
} }
if (lb && minDiff.hasValue()) { if (lb && minDiff.hasValue()) {
// Set lb to the symbolic lower bound. // Set lb to the symbolic lower bound.
lb->resize(getNumSymbolIds() + 1); lb->resize(getNumSymbolIds() + 1);
if (ub)
ub->resize(getNumSymbolIds() + 1);
// The lower bound is the ceildiv of the lb constraint over the coefficient // The lower bound is the ceildiv of the lb constraint over the coefficient
// of the variable at 'pos'. We express the ceildiv equivalently as a floor // of the variable at 'pos'. We express the ceildiv equivalently as a floor
// for uniformity. For eg., if the lower bound constraint was: 32*d0 - N + // for uniformity. For eg., if the lower bound constraint was: 32*d0 - N +
// 31 >= 0, the lower bound for d0 is ceil(N - 31, 32), i.e., floor(N, 32). // 31 >= 0, the lower bound for d0 is ceil(N - 31, 32), i.e., floor(N, 32).
*lbFloorDivisor = atIneq(minLbPosition, pos); *lbFloorDivisor = atIneq(minLbPosition, pos);
assert(*lbFloorDivisor == -atIneq(minUbPosition, pos));
for (unsigned c = 0, e = getNumSymbolIds() + 1; c < e; c++) { for (unsigned c = 0, e = getNumSymbolIds() + 1; c < e; c++) {
(*lb)[c] = -atIneq(minLbPosition, getNumDimIds() + c); (*lb)[c] = -atIneq(minLbPosition, getNumDimIds() + c);
} }
// ceildiv (val / d) = floordiv (val + d - 1 / d); hence, the addition of if (ub) {
// 'atIneq(minLbPosition, pos) - 1' to the constant term. for (unsigned c = 0, e = getNumSymbolIds() + 1; c < e; c++)
(*ub)[c] = atIneq(minUbPosition, getNumDimIds() + c);
}
// The lower bound leads to a ceildiv while the upper bound is a floordiv
// whenever the cofficient at pos != 1. ceildiv (val / d) = floordiv (val +
// d - 1 / d); hence, the addition of 'atIneq(minLbPosition, pos) - 1' to
// the constant term for the lower bound.
(*lb)[getNumSymbolIds()] += atIneq(minLbPosition, pos) - 1; (*lb)[getNumSymbolIds()] += atIneq(minLbPosition, pos) - 1;
} }
return minDiff; return minDiff;
@ -2664,28 +2709,33 @@ FlatAffineConstraints::unionBoundingBox(const FlatAffineConstraints &otherCst) {
// To compute final new lower and upper bounds for the union. // To compute final new lower and upper bounds for the union.
SmallVector<int64_t, 8> newLb(getNumCols()), newUb(getNumCols()); SmallVector<int64_t, 8> newLb(getNumCols()), newUb(getNumCols());
int64_t lbDivisor, otherLbDivisor; int64_t lbFloorDivisor, otherLbFloorDivisor;
for (unsigned d = 0, e = getNumDimIds(); d < e; ++d) { for (unsigned d = 0, e = getNumDimIds(); d < e; ++d) {
auto extent = getConstantBoundOnDimSize(d, &lb, &lbDivisor); auto extent = getConstantBoundOnDimSize(d, &lb, &lbFloorDivisor, &ub);
if (!extent.hasValue()) if (!extent.hasValue())
// TODO(bondhugula): symbolic extents when necessary. // TODO(bondhugula): symbolic extents when necessary.
// TODO(bondhugula): handle union if a dimension is unbounded. // TODO(bondhugula): handle union if a dimension is unbounded.
return failure(); return failure();
auto otherExtent = auto otherExtent = other.getConstantBoundOnDimSize(
other.getConstantBoundOnDimSize(d, &otherLb, &otherLbDivisor); d, &otherLb, &otherLbFloorDivisor, &otherUb);
if (!otherExtent.hasValue() || lbDivisor != otherLbDivisor) if (!otherExtent.hasValue() || lbFloorDivisor != otherLbFloorDivisor)
// TODO(bondhugula): symbolic extents when necessary. // TODO(bondhugula): symbolic extents when necessary.
return failure(); return failure();
assert(lbDivisor > 0 && "divisor always expected to be positive"); assert(lbFloorDivisor > 0 && "divisor always expected to be positive");
auto res = compareBounds(lb, otherLb); auto res = compareBounds(lb, otherLb);
// Identify min. // Identify min.
if (res == BoundCmpResult::Less || res == BoundCmpResult::Equal) { if (res == BoundCmpResult::Less || res == BoundCmpResult::Equal) {
minLb = lb; minLb = lb;
// Since the divisor is for a floordiv, we need to convert to ceildiv,
// i.e., i >= expr floordiv div <=> i >= (expr - div + 1) ceildiv div <=>
// div * i >= expr - div + 1.
minLb.back() -= lbFloorDivisor - 1;
} else if (res == BoundCmpResult::Greater) { } else if (res == BoundCmpResult::Greater) {
minLb = otherLb; minLb = otherLb;
minLb.back() -= otherLbFloorDivisor - 1;
} else { } else {
// Uncomparable - check for constant lower/upper bounds. // Uncomparable - check for constant lower/upper bounds.
auto constLb = getConstantLowerBound(d); auto constLb = getConstantLowerBound(d);
@ -2696,13 +2746,7 @@ FlatAffineConstraints::unionBoundingBox(const FlatAffineConstraints &otherCst) {
minLb.back() = std::min(constLb.getValue(), constOtherLb.getValue()); minLb.back() = std::min(constLb.getValue(), constOtherLb.getValue());
} }
// Do the same for ub's but max of upper bounds. // Do the same for ub's but max of upper bounds. Identify max.
ub = lb;
otherUb = otherLb;
ub.back() += extent.getValue() - 1;
otherUb.back() += otherExtent.getValue() - 1;
// Identify max.
auto uRes = compareBounds(ub, otherUb); auto uRes = compareBounds(ub, otherUb);
if (uRes == BoundCmpResult::Greater || uRes == BoundCmpResult::Equal) { if (uRes == BoundCmpResult::Greater || uRes == BoundCmpResult::Equal) {
maxUb = ub; maxUb = ub;
@ -2723,8 +2767,8 @@ FlatAffineConstraints::unionBoundingBox(const FlatAffineConstraints &otherCst) {
// The divisor for lb, ub, otherLb, otherUb at this point is lbDivisor, // The divisor for lb, ub, otherLb, otherUb at this point is lbDivisor,
// and so it's the divisor for newLb and newUb as well. // and so it's the divisor for newLb and newUb as well.
newLb[d] = lbDivisor; newLb[d] = lbFloorDivisor;
newUb[d] = -lbDivisor; newUb[d] = -lbFloorDivisor;
// Copy over the symbolic part + constant term. // Copy over the symbolic part + constant term.
std::copy(minLb.begin(), minLb.end(), newLb.begin() + getNumDimIds()); std::copy(minLb.begin(), minLb.end(), newLb.begin() + getNumDimIds());
std::transform(newLb.begin() + getNumDimIds(), newLb.end(), std::transform(newLb.begin() + getNumDimIds(), newLb.end(),
@ -2741,6 +2785,9 @@ FlatAffineConstraints::unionBoundingBox(const FlatAffineConstraints &otherCst) {
addInequality(boundingLbs[d]); addInequality(boundingLbs[d]);
addInequality(boundingUbs[d]); addInequality(boundingUbs[d]);
} }
// TODO(mlir-team): copy over pure symbolic constraints from this and 'other'
// over to the union (since the above are just the union along dimensions); we
// shouldn't be discarding any other constraints on the symbols.
return success(); return success();
} }

View File

@ -255,7 +255,7 @@ LogicalResult MemRefRegion::compute(Instruction *inst, unsigned loopDepth,
if (sliceState != nullptr) { if (sliceState != nullptr) {
// Add dim and symbol slice operands. // Add dim and symbol slice operands.
for (const auto &operand : sliceState->lbOperands[0]) { for (const auto &operand : sliceState->lbOperands[0]) {
cst.addDimOrSymbolId(const_cast<Value *>(operand)); cst.addInductionVarOrTerminalSymbol(const_cast<Value *>(operand));
} }
// Add upper/lower bounds from 'sliceState' to 'cst'. // Add upper/lower bounds from 'sliceState' to 'cst'.
LogicalResult ret = LogicalResult ret =

View File

@ -105,8 +105,6 @@ static bool doubleBuffer(Value *oldMemRef, OpPointer<AffineForOp> forOp) {
} }
// Create and place the alloc right before the 'for' instruction. // Create and place the alloc right before the 'for' instruction.
// TODO(mlir-team): we are assuming scoped allocation here, and aren't
// inserting a dealloc -- this isn't the right thing.
Value *newMemRef = Value *newMemRef =
bOuter.create<AllocOp>(forInst->getLoc(), newMemRefType, allocOperands); bOuter.create<AllocOp>(forInst->getLoc(), newMemRefType, allocOperands);

View File

@ -487,6 +487,38 @@ func @relative_loop_bounds(%arg0: memref<1027xf32>) {
// ---- // ----
// This should create a buffer of size 2 for %arg2.
#map_lb = (d0) -> (d0)
#map_ub = (d0) -> (d0 + 3)
#map_acc = (d0) -> (d0 floordiv 8)
// CHECK-LABEL: func @test_analysis_util
func @test_analysis_util(%arg0: memref<4x4x16x1xf32>, %arg1: memref<144x9xf32>, %arg2: memref<2xf32>) -> (memref<144x9xf32>, memref<2xf32>) {
%c0 = constant 0 : index
%0 = alloc() : memref<64x1xf32>
%1 = alloc() : memref<144x4xf32>
%2 = constant 0.0 : f32
for %i8 = 0 to 9 step 3 {
for %i9 = #map_lb(%i8) to #map_ub(%i8) {
for %i17 = 0 to 64 {
%23 = affine.apply #map_acc(%i9)
%25 = load %arg2[%23] : memref<2xf32>
%26 = affine.apply #map_lb(%i17)
%27 = load %0[%26, %c0] : memref<64x1xf32>
store %27, %arg2[%23] : memref<2xf32>
}
}
}
return %arg1, %arg2 : memref<144x9xf32>, memref<2xf32>
}
// CHECK: for %i0 = 0 to 9 step 3 {
// CHECK: [[BUF:%[0-9]+]] = alloc() : memref<2xf32, 2>
// CHECK: dma_start %arg2[%4], [[BUF]]
// CHECK: dma_wait %6[%c0], %c2_0 : memref<1xi32>
// CHECK: for %i1 =
// -----
// Since the fast memory size is 4 KB, DMA generation will happen right under // Since the fast memory size is 4 KB, DMA generation will happen right under
// %i0. // %i0.
@ -515,7 +547,7 @@ func @load_store_same_memref(%arg0: memref<256x1024xf32>) {
return return
} }
// ---- // -----
// This a 3-d loop nest tiled by 4 x 4 x 4. Under %i, %j, %k, the size of a // This a 3-d loop nest tiled by 4 x 4 x 4. Under %i, %j, %k, the size of a
// tile of arg0, arg1, and arg2 accessed is 4 KB (each), i.e., 12 KB in total. // tile of arg0, arg1, and arg2 accessed is 4 KB (each), i.e., 12 KB in total.
@ -557,9 +589,9 @@ func @simple_matmul(%arg0: memref<8x8xvector<64xf32>>, %arg1: memref<8x8xvector<
// FAST-MEM-16KB: dma_wait // FAST-MEM-16KB: dma_wait
// FAST-MEM-16KB: dma_start %arg1 // FAST-MEM-16KB: dma_start %arg1
// FAST-MEM-16KB: dma_wait // FAST-MEM-16KB: dma_wait
// FAST-MEM-16KB: for %i3 = #map2(%i0) to #map3(%i0) { // FAST-MEM-16KB: for %i3 = #map{{[0-9]+}}(%i0) to #map{{[0-9]+}}(%i0) {
// FAST-MEM-16KB-NEXT: for %i4 = #map2(%i1) to #map3(%i1) { // FAST-MEM-16KB-NEXT: for %i4 = #map{{[0-9]+}}(%i1) to #map{{[0-9]+}}(%i1) {
// FAST-MEM-16KB-NEXT: for %i5 = #map2(%i2) to #map3(%i2) { // FAST-MEM-16KB-NEXT: for %i5 = #map{{[0-9]+}}(%i2) to #map{{[0-9]+}}(%i2) {
// FAST-MEM-16KB: } // FAST-MEM-16KB: }
// FAST-MEM-16KB: } // FAST-MEM-16KB: }
// FAST-MEM-16KB: } // FAST-MEM-16KB: }

View File

@ -266,3 +266,22 @@ func @test_floordiv_bound() {
} }
return return
} }
// -----
// This should not give an out of bounds error. The result of the affine.apply
// is composed into the bound map during analysis.
#map_lb = (d0) -> (d0)
#map_ub = (d0) -> (d0 + 4)
// CHECK-LABEL: func @non_composed_bound_operand
func @non_composed_bound_operand(%arg0: memref<1024xf32>) {
for %i0 = 4 to 1028 step 4 {
%i1 = affine.apply (d0) -> (d0 - 4) (%i0)
for %i2 = #map_lb(%i1) to #map_ub(%i1) {
%0 = load %arg0[%i2] : memref<1024xf32>
}
}
return
}