Fix bug in memref region computation with slice loop bounds. Adds loop IV values to ComputationSliceState which are used in FlatAffineConstraints::addSliceBounds, to ensure that constraints are only added for loop IV values which are present in the constraint system.

PiperOrigin-RevId: 235952912
This commit is contained in:
MLIR Team 2019-02-27 11:01:49 -08:00 committed by jpienaar
parent c6c534493d
commit c2766f3760
5 changed files with 88 additions and 22 deletions

View File

@ -398,15 +398,14 @@ public:
SmallVectorImpl<AffineMap> *ubMaps);
/// Adds slice lower bounds represented by lower bounds in 'lbMaps' and upper
/// bounds in 'ubMaps' to the constraint system. Note that both lower/upper
/// bounds share the same operand list 'operands'.
/// This function assumes that position 'lbMaps.size' == 'ubMaps.size',
/// and that positions [0, lbMaps.size) represent dimensional identifiers
/// which correspond to the loop IVs whose iteration bounds are being sliced.
/// bounds in 'ubMaps' to each identifier in the constraint system which has
/// a value in 'values'. Note that both lower/upper bounds share the same
/// operand list 'operands'.
/// This function assumes 'values.size' == 'lbMaps.size' == 'ubMaps.size'.
/// Note that both lower/upper bounds use operands from 'operands'.
/// Returns true on success, returns false for unimplemented cases.
bool addSliceBounds(ArrayRef<AffineMap> lbMaps, ArrayRef<AffineMap> ubMaps,
ArrayRef<Value *> operands);
bool addSliceBounds(ArrayRef<Value *> values, ArrayRef<AffineMap> lbMaps,
ArrayRef<AffineMap> ubMaps, ArrayRef<Value *> operands);
// Adds an inequality (>= 0) from the coefficients specified in inEq.
void addInequality(ArrayRef<int64_t> inEq);

View File

@ -62,11 +62,14 @@ void getLoopIVs(const Instruction &inst,
/// surrounding this instruction.
unsigned getNestingDepth(const Instruction &stmt);
/// ComputationSliceState aggregates loop bound AffineMaps and their associated
/// operands for a set of loops within a loop nest (typically the set of loops
/// surrounding a store operation). Loop bound AffineMaps which are non-null
/// represent slices of that loop's iteration space.
/// ComputationSliceState aggregates loop IVs, loop bound AffineMaps and their
/// associated operands for a set of loops within a loop nest (typically the
/// set of loops surrounding a store operation). Loop bound AffineMaps which
/// are non-null represent slices of that loop's iteration space.
struct ComputationSliceState {
// List of sliced loop IVs (ordered from outermost to innermost).
// EX: 'ivs[i]' has lower bound 'lbs[i]' and upper bound 'ubs[i]'.
SmallVector<Value *, 4> ivs;
// List of lower bound AffineMaps.
SmallVector<AffineMap, 4> lbs;
// List of upper bound AffineMaps.

View File

@ -1488,17 +1488,22 @@ void FlatAffineConstraints::getSliceBounds(unsigned num, MLIRContext *context,
}
}
// Adds slice lower/upper bounds from 'lbMaps'/'upMaps' to the constraint
// system. This function assumes that position 'lbMaps.size' == 'ubMaps.size',
// and that positions [0, lbMaps.size) represent dimensional identifiers which
// correspond to the loop IVs whose iteration bounds are being sliced.
// Adds slice lower bounds represented by lower bounds in 'lbMaps' and upper
// bounds in 'ubMaps' to each value in `values' that appears in the constraint
// system. Note that both lower/upper bounds share the same operand list
// 'operands'.
// This function assumes 'values.size' == 'lbMaps.size' == 'ubMaps.size', and
// skips any null AffineMaps in 'lbMaps' or 'ubMaps'.
// Note that both lower/upper bounds use operands from 'operands'.
// Returns true on success. Returns false for unimplemented cases such as
// semi-affine expressions or expressions with mod/floordiv.
bool FlatAffineConstraints::addSliceBounds(ArrayRef<AffineMap> lbMaps,
bool FlatAffineConstraints::addSliceBounds(ArrayRef<Value *> values,
ArrayRef<AffineMap> lbMaps,
ArrayRef<AffineMap> ubMaps,
ArrayRef<Value *> operands) {
assert(values.size() == lbMaps.size());
assert(lbMaps.size() == ubMaps.size());
// Record positions of the operands in the constraint system.
SmallVector<unsigned, 8> positions;
for (const auto &operand : operands) {
@ -1510,6 +1515,7 @@ bool FlatAffineConstraints::addSliceBounds(ArrayRef<AffineMap> lbMaps,
auto addLowerOrUpperBound = [&](unsigned pos, AffineMap boundMap,
bool lower) -> bool {
assert(pos < getNumIds());
FlatAffineConstraints localVarCst;
std::vector<SmallVector<int64_t, 8>> flatExprs;
if (!getFlattenedAffineExprs(boundMap, &flatExprs, &localVarCst)) {
@ -1539,12 +1545,20 @@ bool FlatAffineConstraints::addSliceBounds(ArrayRef<AffineMap> lbMaps,
};
for (unsigned i = 0, e = lbMaps.size(); i < e; ++i) {
if (!addLowerOrUpperBound(i, lbMaps[i], /*lower=*/true))
return false;
if (!addLowerOrUpperBound(i, ubMaps[i], /*lower=*/false))
return false;
}
assert(lbMaps[i].getNumInputs() == operands.size());
assert(ubMaps[i].getNumInputs() == operands.size());
unsigned pos;
if (!findId(*values[i], &pos))
continue;
if (AffineMap lbMap = lbMaps[i])
if (!addLowerOrUpperBound(pos, lbMap, /*lower=*/true))
return false;
if (AffineMap ubMap = ubMaps[i])
if (!addLowerOrUpperBound(pos, ubMap, /*lower=*/false))
return false;
}
return true;
}

View File

@ -222,7 +222,7 @@ bool MemRefRegion::compute(Instruction *inst, unsigned loopDepth,
}
}
// Add upper/lower bounds from 'sliceState' to 'cst'.
if (!cst.addSliceBounds(sliceState->lbs, sliceState->ubs,
if (!cst.addSliceBounds(sliceState->ivs, sliceState->lbs, sliceState->ubs,
sliceState->lbOperands[0]))
return false;
}
@ -465,6 +465,9 @@ bool mlir::getBackwardComputationSliceState(const MemRefAccess &srcAccess,
dependenceConstraints.projectOut(numSrcLoopIVs + dstLoopDepth,
numDstLoopIVs - dstLoopDepth);
// Add src loop IV values to 'sliceState'.
dependenceConstraints.getIdValues(0, numSrcLoopIVs, &sliceState->ivs);
// Set up lower/upper bound affine maps for the slice.
sliceState->lbs.resize(numSrcLoopIVs, AffineMap());
sliceState->ubs.resize(numSrcLoopIVs, AffineMap());

View File

@ -1868,3 +1868,50 @@ func @slice_tile(%arg1: memref<32x8xf32>, %arg2: memref<32x8xf32>, %0 : f32) ->
// CHECK-NEXT: }
// CHECK-NEXT: return %arg1 : memref<32x8xf32>
// CHECK-NEXT:}
// -----
// Test case which illustrates fix for b/126454413
func @test_add_slice_bounds() {
%a = alloc() : memref<10xf32>
%b = alloc() : memref<10xf32>
%cf7 = constant 7.0 : f32
%c0 = constant 0 : index
for %i0 = 0 to 10 {
for %i1 = 0 to 10 {
for %i2 = 0 to 10 {
%a0 = affine.apply (d0) -> (d0) (%i0)
%a1 = affine.apply (d0) -> (d0) (%i0)
%a2 = affine.apply (d0, d1) -> (d0 - d1) (%a0, %a1)
store %cf7, %a[%a2] : memref<10xf32>
}
}
}
for %i3 = 0 to 10 {
for %i4 = 0 to 10 {
for %i5 = 0 to 10 {
%v0 = load %a[%c0] : memref<10xf32>
}
}
}
// CHECK: for %i0 = 0 to 10 {
// CHECK-NEXT: for %i1 = 0 to 10 {
// CHECK-NEXT: for %i2 = 0 to 10 {
// CHECK-NEXT: %2 = affine.apply #map2(%i0)
// CHECK-NEXT: %3 = affine.apply #map2(%i0)
// CHECK-NEXT: %4 = affine.apply #map3(%2, %3)
// CHECK-NEXT: store %cst, %0[%4] : memref<10xf32>
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: for %i3 = 0 to 10 {
// CHECK-NEXT: for %i4 = 0 to 10 {
// CHECK-NEXT: for %i5 = 0 to 10 {
// CHECK-NEXT: %5 = load %0[%c0] : memref<10xf32>
// CHECK-NEXT: }
// CHECK-NEXT: }
// CHECK-NEXT: }
return
}