[mlir][bufferization] Set emitAccessorPrefix dialect flag

Generate get/set accessors on all bufferization ops. Also update all internal uses.

Differential Revision: https://reviews.llvm.org/D128057
This commit is contained in:
Matthias Springer 2022-06-18 10:23:31 +02:00
parent 745a4caaeb
commit 99260e9583
8 changed files with 58 additions and 56 deletions

View File

@ -41,6 +41,7 @@ def Bufferization_Dialect : Dialect {
kBufferLayoutAttrName = "bufferization.buffer_layout"; kBufferLayoutAttrName = "bufferization.buffer_layout";
}]; }];
let hasOperationAttrVerify = 1; let hasOperationAttrVerify = 1;
let emitAccessorPrefix = kEmitAccessorPrefix_Prefixed;
} }
#endif // BUFFERIZATION_BASE #endif // BUFFERIZATION_BASE

View File

@ -64,7 +64,7 @@ def Bufferization_AllocTensorOp : Bufferization_Op<"alloc_tensor",
``` ```
}]; }];
let arguments = (ins Variadic<Index>:$dynamicSizes, let arguments = (ins Variadic<Index>:$dynamic_sizes,
Optional<AnyTensor>:$copy, Optional<AnyTensor>:$copy,
OptionalAttr<BoolAttr>:$escape); OptionalAttr<BoolAttr>:$escape);
@ -98,7 +98,7 @@ def Bufferization_AllocTensorOp : Bufferization_Op<"alloc_tensor",
// the tensor at dimension `idx`. Asserts that the shape is // the tensor at dimension `idx`. Asserts that the shape is
// dynamic at that `idx`. // dynamic at that `idx`.
unsigned getIndexOfDynamicSize(unsigned idx) { unsigned getIndexOfDynamicSize(unsigned idx) {
assert(!copy() && "no dim sizes specified when copying a tensor"); assert(!getCopy() && "no dim sizes specified when copying a tensor");
assert(isDynamicDim(idx) && "expected dynamic size"); assert(isDynamicDim(idx) && "expected dynamic size");
ArrayRef<int64_t> shape = getType().getShape(); ArrayRef<int64_t> shape = getType().getShape();
return std::count_if( return std::count_if(
@ -169,8 +169,8 @@ def Bufferization_CloneOp : Bufferization_Op<"clone", [
let results = (outs Arg<AnyRankedOrUnrankedMemRef, "", []>:$output); let results = (outs Arg<AnyRankedOrUnrankedMemRef, "", []>:$output);
let extraClassDeclaration = [{ let extraClassDeclaration = [{
Value getSource() { return input(); } Value getSource() { return getInput(); }
Value getTarget() { return output(); } Value getTarget() { return getOutput(); }
}]; }];
let assemblyFormat = "$input attr-dict `:` type($input) `to` type($output)"; let assemblyFormat = "$input attr-dict `:` type($input) `to` type($output)";

View File

@ -57,7 +57,8 @@ struct CloneOpConversion : public OpConversionPattern<bufferization::CloneOp> {
if (!memrefType.isDynamicDim(i)) if (!memrefType.isDynamicDim(i))
continue; continue;
Value size = rewriter.createOrFold<arith::ConstantIndexOp>(loc, i); Value size = rewriter.createOrFold<arith::ConstantIndexOp>(loc, i);
Value dim = rewriter.createOrFold<memref::DimOp>(loc, op.input(), size); Value dim =
rewriter.createOrFold<memref::DimOp>(loc, op.getInput(), size);
dynamicOperands.push_back(dim); dynamicOperands.push_back(dim);
} }
@ -68,7 +69,7 @@ struct CloneOpConversion : public OpConversionPattern<bufferization::CloneOp> {
if (memrefType != allocType) if (memrefType != allocType)
alloc = rewriter.create<memref::CastOp>(op->getLoc(), memrefType, alloc); alloc = rewriter.create<memref::CastOp>(op->getLoc(), memrefType, alloc);
rewriter.replaceOp(op, alloc); rewriter.replaceOp(op, alloc);
rewriter.create<memref::CopyOp>(loc, op.input(), alloc); rewriter.create<memref::CopyOp>(loc, op.getInput(), alloc);
return success(); return success();
} }
}; };

View File

@ -484,7 +484,7 @@ Value bufferization::getBuffer(RewriterBase &rewriter, Value value,
// Replace "%t = to_tensor %m" with %m. // Replace "%t = to_tensor %m" with %m.
if (auto toTensorOp = value.getDefiningOp<bufferization::ToTensorOp>()) if (auto toTensorOp = value.getDefiningOp<bufferization::ToTensorOp>())
return toTensorOp.memref(); return toTensorOp.getMemref();
// Insert to_memref op. // Insert to_memref op.
OpBuilder::InsertionGuard g(rewriter); OpBuilder::InsertionGuard g(rewriter);
@ -502,7 +502,7 @@ bufferization::getBufferType(Value value, const BufferizationOptions &options) {
assert(tensorType && "unexpected non-tensor type"); assert(tensorType && "unexpected non-tensor type");
if (auto toTensorOp = value.getDefiningOp<bufferization::ToTensorOp>()) if (auto toTensorOp = value.getDefiningOp<bufferization::ToTensorOp>())
return toTensorOp.memref().getType().cast<BaseMemRefType>(); return toTensorOp.getMemref().getType().cast<BaseMemRefType>();
return getMemRefType(tensorType, options); return getMemRefType(tensorType, options);
} }

View File

@ -84,11 +84,11 @@ mlir::bufferization::castOrReallocMemRefValue(OpBuilder &b, Value value,
/// to_memref op are different, a memref.cast is needed. /// to_memref op are different, a memref.cast is needed.
LogicalResult mlir::bufferization::foldToMemrefToTensorPair( LogicalResult mlir::bufferization::foldToMemrefToTensorPair(
RewriterBase &rewriter, ToMemrefOp toMemref, bool allowSameType) { RewriterBase &rewriter, ToMemrefOp toMemref, bool allowSameType) {
auto memrefToTensor = toMemref.tensor().getDefiningOp<ToTensorOp>(); auto memrefToTensor = toMemref.getTensor().getDefiningOp<ToTensorOp>();
if (!memrefToTensor) if (!memrefToTensor)
return failure(); return failure();
Type srcType = memrefToTensor.memref().getType(); Type srcType = memrefToTensor.getMemref().getType();
Type destType = toMemref.getType(); Type destType = toMemref.getType();
// Directly rewrite if the type did not change. // Directly rewrite if the type did not change.
@ -96,7 +96,7 @@ LogicalResult mlir::bufferization::foldToMemrefToTensorPair(
// Function can be configured to only handle cases where a cast is needed. // Function can be configured to only handle cases where a cast is needed.
if (!allowSameType) if (!allowSameType)
return failure(); return failure();
rewriter.replaceOp(toMemref, memrefToTensor.memref()); rewriter.replaceOp(toMemref, memrefToTensor.getMemref());
return success(); return success();
} }
@ -107,7 +107,7 @@ LogicalResult mlir::bufferization::foldToMemrefToTensorPair(
// Ranked memref -> Ranked memref cast. // Ranked memref -> Ranked memref cast.
if (rankedSrcType && rankedDestType) { if (rankedSrcType && rankedDestType) {
FailureOr<Value> replacement = castOrReallocMemRefValue( FailureOr<Value> replacement = castOrReallocMemRefValue(
rewriter, memrefToTensor.memref(), rankedDestType); rewriter, memrefToTensor.getMemref(), rankedDestType);
if (failed(replacement)) if (failed(replacement))
return failure(); return failure();
@ -125,7 +125,7 @@ LogicalResult mlir::bufferization::foldToMemrefToTensorPair(
assert(memref::CastOp::areCastCompatible(srcType, destType) && assert(memref::CastOp::areCastCompatible(srcType, destType) &&
"expected that types are cast compatible"); "expected that types are cast compatible");
rewriter.replaceOpWithNewOp<memref::CastOp>(toMemref, destType, rewriter.replaceOpWithNewOp<memref::CastOp>(toMemref, destType,
memrefToTensor.memref()); memrefToTensor.getMemref());
return success(); return success();
} }
@ -162,12 +162,12 @@ LogicalResult AllocTensorOp::bufferize(RewriterBase &rewriter,
// Create buffer allocation. // Create buffer allocation.
Value copyBuffer; Value copyBuffer;
if (copy()) if (getCopy())
copyBuffer = getBuffer(rewriter, copy(), options); copyBuffer = getBuffer(rewriter, getCopy(), options);
auto allocType = auto allocType =
MemRefType::get(getType().getShape(), getType().getElementType()); MemRefType::get(getType().getShape(), getType().getElementType());
SmallVector<Value> dynamicDims = dynamicSizes(); SmallVector<Value> dynamicDims = getDynamicSizes();
if (copy()) { if (getCopy()) {
assert(dynamicDims.empty() && "expected either `copy` or `dynamicDims`"); assert(dynamicDims.empty() && "expected either `copy` or `dynamicDims`");
populateDynamicDimSizes(rewriter, loc, copyBuffer, dynamicDims); populateDynamicDimSizes(rewriter, loc, copyBuffer, dynamicDims);
} }
@ -177,7 +177,7 @@ LogicalResult AllocTensorOp::bufferize(RewriterBase &rewriter,
return failure(); return failure();
// Create memory copy (if any). // Create memory copy (if any).
if (copy()) { if (getCopy()) {
if (failed(options.createMemCpy(rewriter, loc, copyBuffer, *alloc))) if (failed(options.createMemCpy(rewriter, loc, copyBuffer, *alloc)))
return failure(); return failure();
} }
@ -185,8 +185,8 @@ LogicalResult AllocTensorOp::bufferize(RewriterBase &rewriter,
// Should the buffer be deallocated? // Should the buffer be deallocated?
AnalysisState analysisState(options); AnalysisState analysisState(options);
bool dealloc; bool dealloc;
if (escape().hasValue()) { if (getEscape().hasValue()) {
dealloc = !*escape(); dealloc = !*getEscape();
} else { } else {
// No "escape" annotation found. // No "escape" annotation found.
if (options.createDeallocs) { if (options.createDeallocs) {
@ -213,7 +213,7 @@ LogicalResult AllocTensorOp::bufferize(RewriterBase &rewriter,
bool AllocTensorOp::isMemoryWrite(OpResult opResult, bool AllocTensorOp::isMemoryWrite(OpResult opResult,
const AnalysisState &state) { const AnalysisState &state) {
// AllocTensorOps do not write unless they have a `copy` value. // AllocTensorOps do not write unless they have a `copy` value.
return static_cast<bool>(copy()); return static_cast<bool>(getCopy());
} }
bool AllocTensorOp::bufferizesToMemoryRead(OpOperand &opOperand, bool AllocTensorOp::bufferizesToMemoryRead(OpOperand &opOperand,
@ -238,13 +238,13 @@ AllocTensorOp::getAliasingOpResult(OpOperand &opOperand,
} }
LogicalResult AllocTensorOp::verify() { LogicalResult AllocTensorOp::verify() {
if (copy() && !dynamicSizes().empty()) if (getCopy() && !getDynamicSizes().empty())
return emitError("dynamic sizes not needed when copying a tensor"); return emitError("dynamic sizes not needed when copying a tensor");
if (!copy() && getType().getNumDynamicDims() != if (!getCopy() && getType().getNumDynamicDims() !=
static_cast<int64_t>(dynamicSizes().size())) static_cast<int64_t>(getDynamicSizes().size()))
return emitError("expected ") return emitError("expected ")
<< getType().getNumDynamicDims() << " dynamic sizes"; << getType().getNumDynamicDims() << " dynamic sizes";
if (copy() && copy().getType() != getType()) if (getCopy() && getCopy().getType() != getType())
return emitError("expected that `copy` and return type match"); return emitError("expected that `copy` and return type match");
return success(); return success();
} }
@ -284,7 +284,7 @@ struct ReplaceStaticShapeDims : OpRewritePattern<AllocTensorOp> {
LogicalResult matchAndRewrite(AllocTensorOp op, LogicalResult matchAndRewrite(AllocTensorOp op,
PatternRewriter &rewriter) const override { PatternRewriter &rewriter) const override {
if (op.copy()) if (op.getCopy())
return failure(); return failure();
SmallVector<int64_t> newShape = llvm::to_vector(op.getType().getShape()); SmallVector<int64_t> newShape = llvm::to_vector(op.getType().getShape());
SmallVector<Value> newDynamicSizes; SmallVector<Value> newDynamicSizes;
@ -292,7 +292,7 @@ struct ReplaceStaticShapeDims : OpRewritePattern<AllocTensorOp> {
for (int64_t i = 0; i < op.getType().getRank(); ++i) { for (int64_t i = 0; i < op.getType().getRank(); ++i) {
if (!op.isDynamicDim(i)) if (!op.isDynamicDim(i))
continue; continue;
Value value = op.dynamicSizes()[dynValCounter++]; Value value = op.getDynamicSizes()[dynValCounter++];
APInt intVal; APInt intVal;
if (matchPattern(value, m_ConstantInt(&intVal))) { if (matchPattern(value, m_ConstantInt(&intVal))) {
newShape[i] = intVal.getSExtValue(); newShape[i] = intVal.getSExtValue();
@ -306,7 +306,7 @@ struct ReplaceStaticShapeDims : OpRewritePattern<AllocTensorOp> {
return failure(); return failure();
auto newOp = rewriter.create<AllocTensorOp>( auto newOp = rewriter.create<AllocTensorOp>(
op.getLoc(), newType, newDynamicSizes, /*copy=*/Value(), op.getLoc(), newType, newDynamicSizes, /*copy=*/Value(),
/*escape=*/op.escapeAttr()); /*escape=*/op.getEscapeAttr());
rewriter.replaceOpWithNewOp<tensor::CastOp>(op, op.getType(), newOp); rewriter.replaceOpWithNewOp<tensor::CastOp>(op, op.getType(), newOp);
return success(); return success();
} }
@ -381,13 +381,13 @@ ParseResult AllocTensorOp::parse(OpAsmParser &parser, OperationState &result) {
} }
void AllocTensorOp::print(OpAsmPrinter &p) { void AllocTensorOp::print(OpAsmPrinter &p) {
p << "(" << dynamicSizes() << ")"; p << "(" << getDynamicSizes() << ")";
if (copy()) if (getCopy())
p << " copy(" << copy() << ")"; p << " copy(" << getCopy() << ")";
p.printOptionalAttrDict((*this)->getAttrs(), /*elidedAttrs=*/{ p.printOptionalAttrDict((*this)->getAttrs(), /*elidedAttrs=*/{
AllocTensorOp::getOperandSegmentSizeAttr()}); AllocTensorOp::getOperandSegmentSizeAttr()});
p << " : "; p << " : ";
auto type = result().getType(); auto type = getResult().getType();
if (auto validType = type.dyn_cast<::mlir::TensorType>()) if (auto validType = type.dyn_cast<::mlir::TensorType>())
p.printStrippedAttrOrType(validType); p.printStrippedAttrOrType(validType);
else else
@ -396,8 +396,8 @@ void AllocTensorOp::print(OpAsmPrinter &p) {
Value AllocTensorOp::getDynamicSize(OpBuilder &b, unsigned idx) { Value AllocTensorOp::getDynamicSize(OpBuilder &b, unsigned idx) {
assert(isDynamicDim(idx) && "expected dynamic dim"); assert(isDynamicDim(idx) && "expected dynamic dim");
if (copy()) if (getCopy())
return b.create<tensor::DimOp>(getLoc(), copy(), idx); return b.create<tensor::DimOp>(getLoc(), getCopy(), idx);
return getOperand(getIndexOfDynamicSize(idx)); return getOperand(getIndexOfDynamicSize(idx));
} }
@ -408,11 +408,11 @@ Value AllocTensorOp::getDynamicSize(OpBuilder &b, unsigned idx) {
void CloneOp::getEffects( void CloneOp::getEffects(
SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>> SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
&effects) { &effects) {
effects.emplace_back(MemoryEffects::Read::get(), input(), effects.emplace_back(MemoryEffects::Read::get(), getInput(),
SideEffects::DefaultResource::get()); SideEffects::DefaultResource::get());
effects.emplace_back(MemoryEffects::Write::get(), output(), effects.emplace_back(MemoryEffects::Write::get(), getOutput(),
SideEffects::DefaultResource::get()); SideEffects::DefaultResource::get());
effects.emplace_back(MemoryEffects::Allocate::get(), output(), effects.emplace_back(MemoryEffects::Allocate::get(), getOutput(),
SideEffects::DefaultResource::get()); SideEffects::DefaultResource::get());
} }
@ -434,13 +434,13 @@ struct SimplifyClones : public OpRewritePattern<CloneOp> {
return success(); return success();
} }
Value source = cloneOp.input(); Value source = cloneOp.getInput();
// This only finds dealloc operations for the immediate value. It should // This only finds dealloc operations for the immediate value. It should
// also consider aliases. That would also make the safety check below // also consider aliases. That would also make the safety check below
// redundant. // redundant.
llvm::Optional<Operation *> maybeCloneDeallocOp = llvm::Optional<Operation *> maybeCloneDeallocOp =
memref::findDealloc(cloneOp.output()); memref::findDealloc(cloneOp.getOutput());
// Skip if either of them has > 1 deallocate operations. // Skip if either of them has > 1 deallocate operations.
if (!maybeCloneDeallocOp.hasValue()) if (!maybeCloneDeallocOp.hasValue())
return failure(); return failure();
@ -501,12 +501,12 @@ void CloneOp::getCanonicalizationPatterns(RewritePatternSet &results,
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
OpFoldResult ToTensorOp::fold(ArrayRef<Attribute>) { OpFoldResult ToTensorOp::fold(ArrayRef<Attribute>) {
if (auto toMemref = memref().getDefiningOp<ToMemrefOp>()) if (auto toMemref = getMemref().getDefiningOp<ToMemrefOp>())
// Approximate alias analysis by conservatively folding only when no there // Approximate alias analysis by conservatively folding only when no there
// is no interleaved operation. // is no interleaved operation.
if (toMemref->getBlock() == this->getOperation()->getBlock() && if (toMemref->getBlock() == this->getOperation()->getBlock() &&
toMemref->getNextNode() == this->getOperation()) toMemref->getNextNode() == this->getOperation())
return toMemref.tensor(); return toMemref.getTensor();
return {}; return {};
} }
@ -521,8 +521,8 @@ struct DimOfToTensorFolder : public OpRewritePattern<tensor::DimOp> {
if (!memrefToTensorOp) if (!memrefToTensorOp)
return failure(); return failure();
rewriter.replaceOpWithNewOp<memref::DimOp>(dimOp, memrefToTensorOp.memref(), rewriter.replaceOpWithNewOp<memref::DimOp>(
dimOp.index()); dimOp, memrefToTensorOp.getMemref(), dimOp.index());
return success(); return success();
} }
}; };
@ -539,9 +539,9 @@ void ToTensorOp::getCanonicalizationPatterns(RewritePatternSet &results,
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
OpFoldResult ToMemrefOp::fold(ArrayRef<Attribute>) { OpFoldResult ToMemrefOp::fold(ArrayRef<Attribute>) {
if (auto memrefToTensor = tensor().getDefiningOp<ToTensorOp>()) if (auto memrefToTensor = getTensor().getDefiningOp<ToTensorOp>())
if (memrefToTensor.memref().getType() == getType()) if (memrefToTensor.getMemref().getType() == getType())
return memrefToTensor.memref(); return memrefToTensor.getMemref();
return {}; return {};
} }
@ -596,7 +596,7 @@ struct LoadOfToMemref : public OpRewritePattern<memref::LoadOp> {
if (!toMemref) if (!toMemref)
return failure(); return failure();
rewriter.replaceOpWithNewOp<tensor::ExtractOp>(load, toMemref.tensor(), rewriter.replaceOpWithNewOp<tensor::ExtractOp>(load, toMemref.getTensor(),
load.indices()); load.indices());
return success(); return success();
} }

View File

@ -92,7 +92,7 @@ public:
LogicalResult LogicalResult
matchAndRewrite(bufferization::ToTensorOp op, OpAdaptor adaptor, matchAndRewrite(bufferization::ToTensorOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override { ConversionPatternRewriter &rewriter) const override {
rewriter.replaceOp(op, adaptor.memref()); rewriter.replaceOp(op, adaptor.getMemref());
return success(); return success();
} }
}; };
@ -108,7 +108,7 @@ public:
LogicalResult LogicalResult
matchAndRewrite(bufferization::ToMemrefOp op, OpAdaptor adaptor, matchAndRewrite(bufferization::ToMemrefOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override { ConversionPatternRewriter &rewriter) const override {
rewriter.replaceOp(op, adaptor.tensor()); rewriter.replaceOp(op, adaptor.getTensor());
return success(); return success();
} }
}; };

View File

@ -52,11 +52,11 @@ mlir::bufferization::insertTensorCopies(Operation *op,
// Find AllocTensorOps without an `escape` attribute and add the attribute // Find AllocTensorOps without an `escape` attribute and add the attribute
// based on analysis results. // based on analysis results.
if (auto allocTensorOp = dyn_cast<AllocTensorOp>(op)) { if (auto allocTensorOp = dyn_cast<AllocTensorOp>(op)) {
if (allocTensorOp.escape()) if (allocTensorOp.getEscape())
return WalkResult::advance(); return WalkResult::advance();
bool escape = !state.getOptions().createDeallocs || bool escape = !state.getOptions().createDeallocs ||
state.isTensorYielded(allocTensorOp.result()); state.isTensorYielded(allocTensorOp.getResult());
allocTensorOp.escapeAttr(rewriter.getBoolAttr(escape)); allocTensorOp.setEscapeAttr(rewriter.getBoolAttr(escape));
return WalkResult::advance(); return WalkResult::advance();
} }

View File

@ -986,7 +986,7 @@ struct LastTensorLoadCanonicalization : public OpRewritePattern<ForOp> {
if (!isTensor || !tensorLoadOp || (!bbArg.use_empty() && !tensorToMemref)) if (!isTensor || !tensorLoadOp || (!bbArg.use_empty() && !tensorToMemref))
continue; continue;
// If tensorToMemref is present, it must feed into the `ToTensorOp`. // If tensorToMemref is present, it must feed into the `ToTensorOp`.
if (tensorToMemref && tensorLoadOp.memref() != tensorToMemref) if (tensorToMemref && tensorLoadOp.getMemref() != tensorToMemref)
continue; continue;
// TODO: Any aliasing write of tensorLoadOp.memref() nested under `forOp` // TODO: Any aliasing write of tensorLoadOp.memref() nested under `forOp`
// must be before `ToTensorOp` in the block so that the lastWrite // must be before `ToTensorOp` in the block so that the lastWrite
@ -1000,14 +1000,14 @@ struct LastTensorLoadCanonicalization : public OpRewritePattern<ForOp> {
if (tensorToMemref) { if (tensorToMemref) {
rewriter.setInsertionPoint(forOp); rewriter.setInsertionPoint(forOp);
rewriter.replaceOpWithNewOp<bufferization::ToMemrefOp>( rewriter.replaceOpWithNewOp<bufferization::ToMemrefOp>(
tensorToMemref, tensorToMemref.memref().getType(), tensorToMemref, tensorToMemref.getMemref().getType(),
tensorToMemref.tensor()); tensorToMemref.getTensor());
} }
// Clone the tensorLoad after forOp. // Clone the tensorLoad after forOp.
rewriter.setInsertionPointAfter(forOp); rewriter.setInsertionPointAfter(forOp);
Value newTensorLoad = rewriter.create<bufferization::ToTensorOp>( Value newTensorLoad = rewriter.create<bufferization::ToTensorOp>(
loc, tensorLoadOp.memref()); loc, tensorLoadOp.getMemref());
Value forOpResult = forOp.getResult(bbArg.getArgNumber() - /*iv=*/1); Value forOpResult = forOp.getResult(bbArg.getArgNumber() - /*iv=*/1);
replacements.insert(std::make_pair(forOpResult, newTensorLoad)); replacements.insert(std::make_pair(forOpResult, newTensorLoad));