forked from OSchip/llvm-project
[mlir][bufferization] Add an option to use memref types without layout maps
This is for compatibility with existing bufferization passes. Also clean up memref type generation a bit. Differential Revision: https://reviews.llvm.org/D118243
This commit is contained in:
parent
1f3aa2af9d
commit
268524238e
|
@ -98,6 +98,10 @@ struct BufferizationOptions {
|
||||||
/// Should be used only with `testAnalysisOnly = true`.
|
/// Should be used only with `testAnalysisOnly = true`.
|
||||||
unsigned analysisFuzzerSeed = 0;
|
unsigned analysisFuzzerSeed = 0;
|
||||||
|
|
||||||
|
/// Specifies whether fully dynamic layout maps should be used on ranked
|
||||||
|
/// MemRef types. If false, MemRef types will have no layout maps.
|
||||||
|
bool fullyDynamicLayoutMaps = true;
|
||||||
|
|
||||||
/// If set to `true`, does not modify the IR apart from adding attributes (for
|
/// If set to `true`, does not modify the IR apart from adding attributes (for
|
||||||
/// checking the results of the analysis) and post analysis steps.
|
/// checking the results of the analysis) and post analysis steps.
|
||||||
bool testAnalysisOnly = false;
|
bool testAnalysisOnly = false;
|
||||||
|
@ -282,21 +286,17 @@ OpTy replaceOpWithNewBufferizedOp(RewriterBase &rewriter, Operation *op,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return a contiguous MemRefType (i.e. with canonical/empty layout map)
|
/// Return a contiguous MemRefType (i.e. with canonical/empty layout map)
|
||||||
/// with the same shape as `shapedType` and specified `layout` and
|
/// with the same shape as `shapedType` and specified `addressSpace`.
|
||||||
/// `addressSpace`.
|
|
||||||
MemRefType getContiguousMemRefType(ShapedType shapedType,
|
MemRefType getContiguousMemRefType(ShapedType shapedType,
|
||||||
MemRefLayoutAttrInterface layout = {},
|
|
||||||
Attribute memorySpace = {});
|
Attribute memorySpace = {});
|
||||||
|
|
||||||
/// Return an UnrankedMemRefType with the given element type and memory space.
|
|
||||||
UnrankedMemRefType getUnrankedMemRefType(Type elementType,
|
|
||||||
Attribute memorySpace = {});
|
|
||||||
|
|
||||||
/// Return a MemRefType to which the `tensorType` can be bufferized in a
|
/// Return a MemRefType to which the `tensorType` can be bufferized in a
|
||||||
/// composable fashion. The layout must be the most dynamic possible and
|
/// composable fashion. The layout must be the most dynamic possible and
|
||||||
/// canonicalize away once bufferization is finished.
|
/// canonicalize away once bufferization is finished.
|
||||||
MemRefType getDynamicMemRefType(RankedTensorType tensorType,
|
BaseMemRefType getMemRefType(TensorType tensorType,
|
||||||
unsigned addressSpace = 0);
|
const BufferizationOptions &options,
|
||||||
|
MemRefLayoutAttrInterface layout = {},
|
||||||
|
Attribute memorySpace = {});
|
||||||
|
|
||||||
/// Creates a memref allocation with the given type and dynamic extents.
|
/// Creates a memref allocation with the given type and dynamic extents.
|
||||||
FailureOr<Value> createAlloc(OpBuilder &b, Location loc, MemRefType type,
|
FailureOr<Value> createAlloc(OpBuilder &b, Location loc, MemRefType type,
|
||||||
|
|
|
@ -55,6 +55,9 @@ def LinalgComprehensiveModuleBufferize :
|
||||||
Option<"useLinalgCopy", "use-linalg-copy", "bool",
|
Option<"useLinalgCopy", "use-linalg-copy", "bool",
|
||||||
/*default=*/"false",
|
/*default=*/"false",
|
||||||
"Use a copy operation implemented as a Linalg op.">,
|
"Use a copy operation implemented as a Linalg op.">,
|
||||||
|
Option<"fullyDynamicLayoutMaps", "fully-dynamic-layout-maps", "bool",
|
||||||
|
/*default=*/"true",
|
||||||
|
"Generate MemRef types with dynamic offset+strides by default.">,
|
||||||
Option<"analysisFuzzerSeed", "analysis-fuzzer-seed", "unsigned",
|
Option<"analysisFuzzerSeed", "analysis-fuzzer-seed", "unsigned",
|
||||||
/*default=*/"0",
|
/*default=*/"0",
|
||||||
"Analyze ops in random order with a given seed (fuzzer)">,
|
"Analyze ops in random order with a given seed (fuzzer)">,
|
||||||
|
|
|
@ -210,8 +210,10 @@ static void ensureToMemrefOpIsValid(Value tensor, Type memrefType) {
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static Value lookupBuffer(RewriterBase &rewriter, Value tensor) {
|
static Value lookupBuffer(RewriterBase &rewriter, Value tensor,
|
||||||
assert(tensor.getType().isa<TensorType>() && "unexpected non-tensor type");
|
const BufferizationOptions &options) {
|
||||||
|
auto tensorType = tensor.getType().dyn_cast<TensorType>();
|
||||||
|
assert(tensorType && "unexpected non-tensor type");
|
||||||
|
|
||||||
// Replace "%t = to_tensor %m" with %m.
|
// Replace "%t = to_tensor %m" with %m.
|
||||||
if (auto toTensorOp = tensor.getDefiningOp<bufferization::ToTensorOp>())
|
if (auto toTensorOp = tensor.getDefiningOp<bufferization::ToTensorOp>())
|
||||||
|
@ -220,13 +222,7 @@ static Value lookupBuffer(RewriterBase &rewriter, Value tensor) {
|
||||||
// Insert to_memref op.
|
// Insert to_memref op.
|
||||||
OpBuilder::InsertionGuard g(rewriter);
|
OpBuilder::InsertionGuard g(rewriter);
|
||||||
setInsertionPointAfter(rewriter, tensor);
|
setInsertionPointAfter(rewriter, tensor);
|
||||||
Type memrefType;
|
Type memrefType = getMemRefType(tensorType, options);
|
||||||
if (auto rankedTensorType = tensor.getType().dyn_cast<RankedTensorType>()) {
|
|
||||||
memrefType = getDynamicMemRefType(rankedTensorType);
|
|
||||||
} else {
|
|
||||||
memrefType = getUnrankedMemRefType(
|
|
||||||
tensor.getType().cast<TensorType>().getElementType());
|
|
||||||
}
|
|
||||||
ensureToMemrefOpIsValid(tensor, memrefType);
|
ensureToMemrefOpIsValid(tensor, memrefType);
|
||||||
return rewriter.create<bufferization::ToMemrefOp>(tensor.getLoc(), memrefType,
|
return rewriter.create<bufferization::ToMemrefOp>(tensor.getLoc(), memrefType,
|
||||||
tensor);
|
tensor);
|
||||||
|
@ -242,7 +238,7 @@ FailureOr<Value> BufferizationState::getBuffer(
|
||||||
Operation *op = opOperand.getOwner();
|
Operation *op = opOperand.getOwner();
|
||||||
Location loc = op->getLoc();
|
Location loc = op->getLoc();
|
||||||
Value operand = opOperand.get();
|
Value operand = opOperand.get();
|
||||||
Value operandBuffer = lookupBuffer(rewriter, operand);
|
Value operandBuffer = lookupBuffer(rewriter, operand, options);
|
||||||
|
|
||||||
if (forceInPlace || isInPlace(opOperand))
|
if (forceInPlace || isInPlace(opOperand))
|
||||||
return operandBuffer;
|
return operandBuffer;
|
||||||
|
@ -513,27 +509,43 @@ bool bufferization::isFunctionArgument(Value value) {
|
||||||
return isa<FuncOp>(bbArg.getOwner()->getParentOp());
|
return isa<FuncOp>(bbArg.getOwner()->getParentOp());
|
||||||
}
|
}
|
||||||
|
|
||||||
MemRefType
|
MemRefType bufferization::getContiguousMemRefType(ShapedType shapedType,
|
||||||
bufferization::getContiguousMemRefType(ShapedType shapedType,
|
Attribute memorySpace) {
|
||||||
MemRefLayoutAttrInterface layout,
|
MemRefLayoutAttrInterface layout = {};
|
||||||
Attribute memorySpace) {
|
|
||||||
return MemRefType::get(shapedType.getShape(), shapedType.getElementType(),
|
return MemRefType::get(shapedType.getShape(), shapedType.getElementType(),
|
||||||
layout, memorySpace);
|
layout, memorySpace);
|
||||||
}
|
}
|
||||||
|
|
||||||
UnrankedMemRefType bufferization::getUnrankedMemRefType(Type elementType,
|
BaseMemRefType bufferization::getMemRefType(TensorType tensorType,
|
||||||
Attribute memorySpace) {
|
const BufferizationOptions &options,
|
||||||
return UnrankedMemRefType::get(elementType, memorySpace);
|
MemRefLayoutAttrInterface layout,
|
||||||
}
|
Attribute memorySpace) {
|
||||||
|
// Case 1: Unranked memref type.
|
||||||
|
if (auto unrankedTensorType = tensorType.dyn_cast<UnrankedTensorType>()) {
|
||||||
|
assert(!layout && "UnrankedTensorType cannot have a layout map");
|
||||||
|
return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
|
||||||
|
memorySpace);
|
||||||
|
}
|
||||||
|
|
||||||
MemRefType bufferization::getDynamicMemRefType(RankedTensorType tensorType,
|
// Case 2: Ranked memref type with specified layout. If fully dynamic layout
|
||||||
unsigned addressSpace) {
|
// maps are not requested, generate a type with `layout`, which is empty (no
|
||||||
|
// layout map) by default.
|
||||||
|
auto rankedTensorType = tensorType.cast<RankedTensorType>();
|
||||||
|
if (layout || !options.fullyDynamicLayoutMaps) {
|
||||||
|
return MemRefType::get(rankedTensorType.getShape(),
|
||||||
|
rankedTensorType.getElementType(), layout,
|
||||||
|
memorySpace);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Case 3: Ranked memref type with unspecified layout. Choose the most dynamic
|
||||||
|
// one.
|
||||||
// TODO: address space decisions to connect with the actual alloc.
|
// TODO: address space decisions to connect with the actual alloc.
|
||||||
int64_t dynamicOffset = ShapedType::kDynamicStrideOrOffset;
|
int64_t dynamicOffset = ShapedType::kDynamicStrideOrOffset;
|
||||||
SmallVector<int64_t> dynamicStrides(tensorType.getRank(),
|
SmallVector<int64_t> dynamicStrides(rankedTensorType.getRank(),
|
||||||
ShapedType::kDynamicStrideOrOffset);
|
ShapedType::kDynamicStrideOrOffset);
|
||||||
AffineMap stridedLayout = makeStridedLinearLayoutMap(
|
AffineMap stridedLayout = makeStridedLinearLayoutMap(
|
||||||
dynamicStrides, dynamicOffset, tensorType.getContext());
|
dynamicStrides, dynamicOffset, rankedTensorType.getContext());
|
||||||
return MemRefType::get(tensorType.getShape(), tensorType.getElementType(),
|
return MemRefType::get(rankedTensorType.getShape(),
|
||||||
stridedLayout, addressSpace);
|
rankedTensorType.getElementType(), stridedLayout,
|
||||||
|
memorySpace);
|
||||||
}
|
}
|
||||||
|
|
|
@ -308,16 +308,15 @@ static FuncOp getCalledFunction(CallOpInterface callOp) {
|
||||||
/// dynamic buffer type supported.
|
/// dynamic buffer type supported.
|
||||||
/// A later pass across all CallOps in the module can decide whether to simplify
|
/// A later pass across all CallOps in the module can decide whether to simplify
|
||||||
/// the types of to version according to some cost model.
|
/// the types of to version according to some cost model.
|
||||||
static FunctionType getBufferizedFunctionType(MLIRContext *ctx,
|
static FunctionType
|
||||||
TypeRange argumentTypes,
|
getBufferizedFunctionType(MLIRContext *ctx, TypeRange argumentTypes,
|
||||||
TypeRange resultTypes) {
|
TypeRange resultTypes,
|
||||||
auto rewrite = [](Type t) -> Type {
|
const BufferizationOptions &options) {
|
||||||
|
auto rewrite = [&](Type t) -> Type {
|
||||||
// TODO: non-zero address space.
|
// TODO: non-zero address space.
|
||||||
// TODO: layout information if relevant.
|
// TODO: layout information if relevant.
|
||||||
if (auto rankedTensorType = t.dyn_cast<RankedTensorType>())
|
|
||||||
return getDynamicMemRefType(rankedTensorType);
|
|
||||||
if (auto tensorType = t.dyn_cast<TensorType>())
|
if (auto tensorType = t.dyn_cast<TensorType>())
|
||||||
return getUnrankedMemRefType(tensorType.getElementType());
|
return getMemRefType(tensorType, options);
|
||||||
return t;
|
return t;
|
||||||
};
|
};
|
||||||
auto argTypes = llvm::to_vector<4>(llvm::map_range(argumentTypes, rewrite));
|
auto argTypes = llvm::to_vector<4>(llvm::map_range(argumentTypes, rewrite));
|
||||||
|
@ -398,7 +397,8 @@ static LogicalResult bufferizeFuncOpBoundary(FuncOp funcOp,
|
||||||
return funcOp->emitError() << "cannot bufferize bodiless function that "
|
return funcOp->emitError() << "cannot bufferize bodiless function that "
|
||||||
<< "returns a tensor";
|
<< "returns a tensor";
|
||||||
FunctionType bufferizedFuncType = getBufferizedFunctionType(
|
FunctionType bufferizedFuncType = getBufferizedFunctionType(
|
||||||
funcOp.getContext(), funcOp.getType().getInputs(), TypeRange{});
|
funcOp.getContext(), funcOp.getType().getInputs(), TypeRange{},
|
||||||
|
state.getOptions());
|
||||||
funcOp.setType(bufferizedFuncType);
|
funcOp.setType(bufferizedFuncType);
|
||||||
return success();
|
return success();
|
||||||
}
|
}
|
||||||
|
@ -431,7 +431,8 @@ static LogicalResult bufferizeFuncOpBoundary(FuncOp funcOp,
|
||||||
// 2. Rewrite the terminator without the inPlace bufferizable values.
|
// 2. Rewrite the terminator without the inPlace bufferizable values.
|
||||||
ValueRange retValues{returnValues};
|
ValueRange retValues{returnValues};
|
||||||
FunctionType bufferizedFuncType = getBufferizedFunctionType(
|
FunctionType bufferizedFuncType = getBufferizedFunctionType(
|
||||||
funcOp.getContext(), funcOp.getType().getInputs(), retValues.getTypes());
|
funcOp.getContext(), funcOp.getType().getInputs(), retValues.getTypes(),
|
||||||
|
state.getOptions());
|
||||||
OpBuilder b(returnOp);
|
OpBuilder b(returnOp);
|
||||||
b.create<ReturnOp>(returnOp.getLoc(), returnValues);
|
b.create<ReturnOp>(returnOp.getLoc(), returnValues);
|
||||||
returnOp->erase();
|
returnOp->erase();
|
||||||
|
@ -822,7 +823,7 @@ struct CallOpInterface
|
||||||
// Get the bufferized FunctionType for funcOp or construct it if not yet
|
// Get the bufferized FunctionType for funcOp or construct it if not yet
|
||||||
// available.
|
// available.
|
||||||
FunctionType bufferizedFuncType = getBufferizedFunctionType(
|
FunctionType bufferizedFuncType = getBufferizedFunctionType(
|
||||||
funcOp.getContext(), argumentTypes, resultTypes);
|
funcOp.getContext(), argumentTypes, resultTypes, state.getOptions());
|
||||||
|
|
||||||
// 3. Rewrite tensor operands as memrefs based on `bufferizedFuncType`.
|
// 3. Rewrite tensor operands as memrefs based on `bufferizedFuncType`.
|
||||||
for (OpOperand &opOperand : callOp->getOpOperands()) {
|
for (OpOperand &opOperand : callOp->getOpOperands()) {
|
||||||
|
|
|
@ -74,11 +74,8 @@ struct ExecuteRegionOpInterface
|
||||||
// Compute new result types.
|
// Compute new result types.
|
||||||
SmallVector<Type> newResultTypes;
|
SmallVector<Type> newResultTypes;
|
||||||
for (Type type : executeRegionOp->getResultTypes()) {
|
for (Type type : executeRegionOp->getResultTypes()) {
|
||||||
if (auto rankedTensorType = type.dyn_cast<RankedTensorType>()) {
|
if (auto tensorType = type.dyn_cast<TensorType>()) {
|
||||||
newResultTypes.push_back(getDynamicMemRefType(rankedTensorType));
|
newResultTypes.push_back(getMemRefType(tensorType, state.getOptions()));
|
||||||
} else if (auto tensorType = type.dyn_cast<TensorType>()) {
|
|
||||||
newResultTypes.push_back(
|
|
||||||
getUnrankedMemRefType(tensorType.getElementType()));
|
|
||||||
} else {
|
} else {
|
||||||
newResultTypes.push_back(type);
|
newResultTypes.push_back(type);
|
||||||
}
|
}
|
||||||
|
@ -186,11 +183,8 @@ struct IfOpInterface
|
||||||
// Compute new types of the bufferized scf.if op.
|
// Compute new types of the bufferized scf.if op.
|
||||||
SmallVector<Type> newTypes;
|
SmallVector<Type> newTypes;
|
||||||
for (Type returnType : ifOp->getResultTypes()) {
|
for (Type returnType : ifOp->getResultTypes()) {
|
||||||
if (returnType.isa<TensorType>()) {
|
if (auto tensorType = returnType.dyn_cast<TensorType>()) {
|
||||||
assert(returnType.isa<RankedTensorType>() &&
|
newTypes.push_back(getMemRefType(tensorType, state.getOptions()));
|
||||||
"unsupported unranked tensor");
|
|
||||||
newTypes.push_back(
|
|
||||||
getDynamicMemRefType(returnType.cast<RankedTensorType>()));
|
|
||||||
} else {
|
} else {
|
||||||
newTypes.push_back(returnType);
|
newTypes.push_back(returnType);
|
||||||
}
|
}
|
||||||
|
|
|
@ -120,6 +120,7 @@ void LinalgComprehensiveModuleBufferize::runOnOperation() {
|
||||||
options->allowUnknownOps = allowUnknownOps;
|
options->allowUnknownOps = allowUnknownOps;
|
||||||
options->analysisFuzzerSeed = analysisFuzzerSeed;
|
options->analysisFuzzerSeed = analysisFuzzerSeed;
|
||||||
options->createDeallocs = createDeallocs;
|
options->createDeallocs = createDeallocs;
|
||||||
|
options->fullyDynamicLayoutMaps = fullyDynamicLayoutMaps;
|
||||||
options->printConflicts = printConflicts;
|
options->printConflicts = printConflicts;
|
||||||
options->testAnalysisOnly = testAnalysisOnly;
|
options->testAnalysisOnly = testAnalysisOnly;
|
||||||
|
|
||||||
|
|
|
@ -65,14 +65,8 @@ struct CastOpInterface
|
||||||
layout = rankedMemRefType.getLayout();
|
layout = rankedMemRefType.getLayout();
|
||||||
|
|
||||||
// Compute the new memref type.
|
// Compute the new memref type.
|
||||||
Type resultMemRefType;
|
Type resultMemRefType = getMemRefType(resultTensorType, state.getOptions(),
|
||||||
if (resultTensorType.isa<RankedTensorType>()) {
|
layout, memorySpace);
|
||||||
resultMemRefType =
|
|
||||||
getContiguousMemRefType(resultTensorType, layout, memorySpace);
|
|
||||||
} else {
|
|
||||||
resultMemRefType =
|
|
||||||
getUnrankedMemRefType(resultTensorType.getElementType(), memorySpace);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Replace the op with a memref.cast.
|
// Replace the op with a memref.cast.
|
||||||
assert(memref::CastOp::areCastCompatible(resultBuffer->getType(),
|
assert(memref::CastOp::areCastCompatible(resultBuffer->getType(),
|
||||||
|
@ -263,8 +257,7 @@ struct FromElementsOpInterface
|
||||||
Location loc = op->getLoc();
|
Location loc = op->getLoc();
|
||||||
auto tensorType = fromElementsOp.getType().cast<RankedTensorType>();
|
auto tensorType = fromElementsOp.getType().cast<RankedTensorType>();
|
||||||
auto shape = tensorType.getShape();
|
auto shape = tensorType.getShape();
|
||||||
MemRefType resultType =
|
MemRefType resultType = getContiguousMemRefType(tensorType);
|
||||||
MemRefType::get(tensorType.getShape(), tensorType.getElementType());
|
|
||||||
FailureOr<Value> maybeBuffer =
|
FailureOr<Value> maybeBuffer =
|
||||||
createAlloc(rewriter, loc, resultType, {},
|
createAlloc(rewriter, loc, resultType, {},
|
||||||
/*deallocMemref=*/state.getOptions().createDeallocs,
|
/*deallocMemref=*/state.getOptions().createDeallocs,
|
||||||
|
|
|
@ -1,5 +1,8 @@
|
||||||
// RUN: mlir-opt %s -allow-unregistered-dialect -linalg-comprehensive-module-bufferize="allow-return-memref allow-unknown-ops" -split-input-file | FileCheck %s
|
// RUN: mlir-opt %s -allow-unregistered-dialect -linalg-comprehensive-module-bufferize="allow-return-memref allow-unknown-ops" -split-input-file | FileCheck %s
|
||||||
|
|
||||||
|
// Test bufferization using memref types that have no layout map.
|
||||||
|
// RUN: mlir-opt %s -allow-unregistered-dialect -linalg-comprehensive-module-bufferize="allow-return-memref allow-unknown-ops fully-dynamic-layout-maps=0" -split-input-file | FileCheck %s --check-prefix=CHECK-NO-LAYOUT-MAP
|
||||||
|
|
||||||
// Run fuzzer with different seeds.
|
// Run fuzzer with different seeds.
|
||||||
// RUN: mlir-opt %s -allow-unregistered-dialect -linalg-comprehensive-module-bufferize="allow-return-memref test-analysis-only analysis-fuzzer-seed=23" -split-input-file -o /dev/null
|
// RUN: mlir-opt %s -allow-unregistered-dialect -linalg-comprehensive-module-bufferize="allow-return-memref test-analysis-only analysis-fuzzer-seed=23" -split-input-file -o /dev/null
|
||||||
// RUN: mlir-opt %s -allow-unregistered-dialect -linalg-comprehensive-module-bufferize="allow-return-memref test-analysis-only analysis-fuzzer-seed=59" -split-input-file -o /dev/null
|
// RUN: mlir-opt %s -allow-unregistered-dialect -linalg-comprehensive-module-bufferize="allow-return-memref test-analysis-only analysis-fuzzer-seed=59" -split-input-file -o /dev/null
|
||||||
|
@ -8,20 +11,28 @@
|
||||||
// RUN: mlir-opt %s -allow-unregistered-dialect -test-comprehensive-function-bufferize="dialect-filter=tensor allow-unknown-ops allow-return-memref" -canonicalize -split-input-file | FileCheck %s --check-prefix=CHECK-TENSOR
|
// RUN: mlir-opt %s -allow-unregistered-dialect -test-comprehensive-function-bufferize="dialect-filter=tensor allow-unknown-ops allow-return-memref" -canonicalize -split-input-file | FileCheck %s --check-prefix=CHECK-TENSOR
|
||||||
// RUN: mlir-opt %s -allow-unregistered-dialect -test-comprehensive-function-bufferize="dialect-filter=scf allow-unknown-ops allow-return-memref" -canonicalize -split-input-file | FileCheck %s --check-prefix=CHECK-SCF
|
// RUN: mlir-opt %s -allow-unregistered-dialect -test-comprehensive-function-bufferize="dialect-filter=scf allow-unknown-ops allow-return-memref" -canonicalize -split-input-file | FileCheck %s --check-prefix=CHECK-SCF
|
||||||
|
|
||||||
|
// CHECK: #[[$MAP:.*]] = affine_map<(d0)[s0, s1] -> (d0 * s1 + s0)>
|
||||||
|
|
||||||
// CHECK-LABEL: func @use_of_unknown_op_1(
|
// CHECK-LABEL: func @use_of_unknown_op_1(
|
||||||
// CHECK-SAME: %[[m1:.*]]: memref<?xf32
|
// CHECK-SAME: %[[m1:.*]]: memref<?xf32, #[[$MAP]]>
|
||||||
|
// CHECK-NO-LAYOUT-MAP-LABEL: func @use_of_unknown_op_1(
|
||||||
|
// CHECK-NO-LAYOUT-MAP-SAME: %[[m1:.*]]: memref<?xf32>)
|
||||||
func @use_of_unknown_op_1(%t1: tensor<?xf32> {linalg.inplaceable = true})
|
func @use_of_unknown_op_1(%t1: tensor<?xf32> {linalg.inplaceable = true})
|
||||||
-> vector<5xf32> {
|
-> vector<5xf32> {
|
||||||
// ToTensorOp is generated because the function is bufferized and has a
|
// ToTensorOp is generated because the function is bufferized and has a
|
||||||
// memref block argument.
|
// memref block argument.
|
||||||
// CHECK: %[[m1_tensor:.*]] = bufferization.to_tensor %[[m1]]
|
// CHECK: %[[m1_tensor:.*]] = bufferization.to_tensor %[[m1]] : memref<?xf32, #[[$MAP]]>
|
||||||
// CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[m1_tensor]])
|
// CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[m1_tensor]])
|
||||||
|
// CHECK-NO-LAYOUT-MAP: %[[m1_tensor:.*]] = bufferization.to_tensor %[[m1]] : memref<?xf32>
|
||||||
|
// CHECK-NO-LAYOUT-MAP: %[[dummy:.*]] = "test.dummy_op"(%[[m1_tensor]])
|
||||||
%0 = "test.dummy_op"(%t1) : (tensor<?xf32>) -> tensor<?xf32>
|
%0 = "test.dummy_op"(%t1) : (tensor<?xf32>) -> tensor<?xf32>
|
||||||
|
|
||||||
%idx = arith.constant 0 : index
|
%idx = arith.constant 0 : index
|
||||||
%cst = arith.constant 0.0 : f32
|
%cst = arith.constant 0.0 : f32
|
||||||
// CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]]
|
// CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] : memref<?xf32, #[[$MAP]]>
|
||||||
// CHECK: vector.transfer_read %[[dummy_memref]]
|
// CHECK: vector.transfer_read %[[dummy_memref]][%{{.*}}], %{{.*}} : memref<?xf32, #[[$MAP]]>
|
||||||
|
// CHECK-NO-LAYOUT-MAP: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] : memref<?xf32>
|
||||||
|
// CHECK-NO-LAYOUT-MAP: vector.transfer_read %[[dummy_memref]][%{{.*}}], %{{.*}} : memref<?xf32>
|
||||||
%1 = vector.transfer_read %0[%idx], %cst : tensor<?xf32>, vector<5xf32>
|
%1 = vector.transfer_read %0[%idx], %cst : tensor<?xf32>, vector<5xf32>
|
||||||
return %1 : vector<5xf32>
|
return %1 : vector<5xf32>
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,11 @@
|
||||||
// RUN: mlir-opt %s -linalg-comprehensive-module-bufferize="allow-return-memref test-analysis-only analysis-fuzzer-seed=59" -split-input-file -o /dev/null
|
// RUN: mlir-opt %s -linalg-comprehensive-module-bufferize="allow-return-memref test-analysis-only analysis-fuzzer-seed=59" -split-input-file -o /dev/null
|
||||||
// RUN: mlir-opt %s -linalg-comprehensive-module-bufferize="allow-return-memref test-analysis-only analysis-fuzzer-seed=91" -split-input-file -o /dev/null
|
// RUN: mlir-opt %s -linalg-comprehensive-module-bufferize="allow-return-memref test-analysis-only analysis-fuzzer-seed=91" -split-input-file -o /dev/null
|
||||||
|
|
||||||
|
// Test bufferization using memref types that have no layout map.
|
||||||
|
// RUN: mlir-opt %s -linalg-comprehensive-module-bufferize="allow-return-memref fully-dynamic-layout-maps=0" -split-input-file | FileCheck %s --check-prefix=CHECK-NO-LAYOUT-MAP
|
||||||
|
|
||||||
// CHECK-LABEL: func @transfer_read(%{{.*}}: memref<?xf32, #map>) -> vector<4xf32> {
|
// CHECK-LABEL: func @transfer_read(%{{.*}}: memref<?xf32, #map>) -> vector<4xf32> {
|
||||||
|
// CHECK-NO-LAYOUT-MAP-LABEL: func @transfer_read(%{{.*}}: memref<?xf32>) -> vector<4xf32>
|
||||||
func @transfer_read(
|
func @transfer_read(
|
||||||
%A : tensor<?xf32> {linalg.inplaceable = false})
|
%A : tensor<?xf32> {linalg.inplaceable = false})
|
||||||
-> (vector<4xf32>)
|
-> (vector<4xf32>)
|
||||||
|
@ -26,6 +30,7 @@ func @transfer_read(
|
||||||
|
|
||||||
// CHECK-LABEL: func @fill_inplace(
|
// CHECK-LABEL: func @fill_inplace(
|
||||||
// CHECK-SAME: %[[A:[a-zA-Z0-9]*]]: memref<?xf32, #[[$map_1d_dyn]]>
|
// CHECK-SAME: %[[A:[a-zA-Z0-9]*]]: memref<?xf32, #[[$map_1d_dyn]]>
|
||||||
|
// CHECK-NO-LAYOUT-MAP-LABEL: func @fill_inplace(%{{.*}}: memref<?xf32>) {
|
||||||
func @fill_inplace(
|
func @fill_inplace(
|
||||||
%A : tensor<?xf32> {linalg.inplaceable = true})
|
%A : tensor<?xf32> {linalg.inplaceable = true})
|
||||||
-> tensor<?xf32>
|
-> tensor<?xf32>
|
||||||
|
@ -63,6 +68,7 @@ func @tensor_extract(%A : tensor<?xf32> {linalg.inplaceable = false}) -> (f32) {
|
||||||
/// No linalg.inplaceable flag, must allocate.
|
/// No linalg.inplaceable flag, must allocate.
|
||||||
// CHECK-LABEL: func @not_inplace(
|
// CHECK-LABEL: func @not_inplace(
|
||||||
// CHECK-SAME: %[[A:[a-zA-Z0-9]*]]: memref<?xf32, #[[$map_1d_dyn]]>) -> memref<?xf32> {
|
// CHECK-SAME: %[[A:[a-zA-Z0-9]*]]: memref<?xf32, #[[$map_1d_dyn]]>) -> memref<?xf32> {
|
||||||
|
// CHECK-NO-LAYOUT-MAP-LABEL: func @not_inplace(%{{.*}}: memref<?xf32>) -> memref<?xf32>
|
||||||
func @not_inplace(
|
func @not_inplace(
|
||||||
%A : tensor<?xf32> {linalg.inplaceable = false})
|
%A : tensor<?xf32> {linalg.inplaceable = false})
|
||||||
-> tensor<?xf32>
|
-> tensor<?xf32>
|
||||||
|
@ -86,6 +92,7 @@ func @not_inplace(
|
||||||
|
|
||||||
// CHECK-LABEL: func @not_inplace
|
// CHECK-LABEL: func @not_inplace
|
||||||
// CHECK-SAME: %[[A:[a-zA-Z0-9]*]]: memref<?x?xf32, #[[$map_2d_dyn]]>) {
|
// CHECK-SAME: %[[A:[a-zA-Z0-9]*]]: memref<?x?xf32, #[[$map_2d_dyn]]>) {
|
||||||
|
// CHECK-NO-LAYOUT-MAP-LABEL: func @not_inplace(%{{.*}}: memref<?x?xf32>) {
|
||||||
func @not_inplace(
|
func @not_inplace(
|
||||||
%A : tensor<?x?xf32> {linalg.inplaceable = true})
|
%A : tensor<?x?xf32> {linalg.inplaceable = true})
|
||||||
-> tensor<?x?xf32>
|
-> tensor<?x?xf32>
|
||||||
|
|
|
@ -91,6 +91,10 @@ struct TestComprehensiveFunctionBufferize
|
||||||
*this, "dialect-filter",
|
*this, "dialect-filter",
|
||||||
llvm::cl::desc("Bufferize only ops from the specified dialects"),
|
llvm::cl::desc("Bufferize only ops from the specified dialects"),
|
||||||
llvm::cl::ZeroOrMore, llvm::cl::MiscFlags::CommaSeparated};
|
llvm::cl::ZeroOrMore, llvm::cl::MiscFlags::CommaSeparated};
|
||||||
|
Option<bool> fullyDynamicLayoutMaps{
|
||||||
|
*this, "fully-dynamic-layout-maps",
|
||||||
|
llvm::cl::desc("Use fully dynamic layout maps on memref types"),
|
||||||
|
llvm::cl::init(true)};
|
||||||
Option<bool> createDeallocs{
|
Option<bool> createDeallocs{
|
||||||
*this, "create-deallocs",
|
*this, "create-deallocs",
|
||||||
llvm::cl::desc("Specify if buffers should be deallocated"),
|
llvm::cl::desc("Specify if buffers should be deallocated"),
|
||||||
|
@ -108,6 +112,7 @@ void TestComprehensiveFunctionBufferize::runOnOperation() {
|
||||||
options->allowUnknownOps = allowUnknownOps;
|
options->allowUnknownOps = allowUnknownOps;
|
||||||
options->testAnalysisOnly = testAnalysisOnly;
|
options->testAnalysisOnly = testAnalysisOnly;
|
||||||
options->analysisFuzzerSeed = analysisFuzzerSeed;
|
options->analysisFuzzerSeed = analysisFuzzerSeed;
|
||||||
|
options->fullyDynamicLayoutMaps = fullyDynamicLayoutMaps;
|
||||||
options->createDeallocs = createDeallocs;
|
options->createDeallocs = createDeallocs;
|
||||||
|
|
||||||
if (dialectFilter.hasValue()) {
|
if (dialectFilter.hasValue()) {
|
||||||
|
|
Loading…
Reference in New Issue