forked from OSchip/llvm-project
[mlir] Update flipped accessors (NFC)
Follow up with memref flipped and flipping any intermediate changes made.
This commit is contained in:
parent
08d651d7ba
commit
04235d07ad
|
@ -65,7 +65,7 @@ public:
|
|||
if (!resultType)
|
||||
return failure();
|
||||
auto newOp =
|
||||
rewriter.create<OpType>(op.getLoc(), resultType, op.source(),
|
||||
rewriter.create<OpType>(op.getLoc(), resultType, op.getSource(),
|
||||
mixedOffsets, mixedSizes, mixedStrides);
|
||||
CastOpFunc func;
|
||||
func(rewriter, op, newOp);
|
||||
|
|
|
@ -86,9 +86,9 @@ static OpFoldResult foldReshapeOp(ReshapeOpTy reshapeOp,
|
|||
// Fold producer-consumer reshape ops that where the operand type of the
|
||||
// producer is same as the return type of the consumer.
|
||||
auto reshapeSrcOp =
|
||||
reshapeOp.src().template getDefiningOp<InverseReshapeOpTy>();
|
||||
reshapeOp.getSrc().template getDefiningOp<InverseReshapeOpTy>();
|
||||
if (reshapeSrcOp && reshapeSrcOp.getSrcType() == reshapeOp.getResultType())
|
||||
return reshapeSrcOp.src();
|
||||
return reshapeSrcOp.getSrc();
|
||||
// Reshape of a constant can be replaced with a new constant.
|
||||
if (auto elements = operands.front().dyn_cast_or_null<DenseElementsAttr>()) {
|
||||
return elements.reshape(
|
||||
|
@ -122,10 +122,10 @@ static LogicalResult verifyReshapeLikeTypes(Op op, T expandedType,
|
|||
"extent dimensions to zero-rank tensor/memref");
|
||||
return success();
|
||||
}
|
||||
if (collapsedRank != op.reassociation().size())
|
||||
if (collapsedRank != op.getReassociation().size())
|
||||
return op.emitOpError("expected rank of the collapsed type(")
|
||||
<< collapsedRank << ") to be the number of reassociation maps("
|
||||
<< op.reassociation().size() << ")";
|
||||
<< op.getReassociation().size() << ")";
|
||||
auto maps = op.getReassociationMaps();
|
||||
for (auto it : llvm::enumerate(maps))
|
||||
if (it.value().getNumDims() != expandedRank)
|
||||
|
@ -172,15 +172,16 @@ struct ComposeReassociativeReshapeOps : public OpRewritePattern<ReshapeOpTy> {
|
|||
using OpRewritePattern<ReshapeOpTy>::OpRewritePattern;
|
||||
LogicalResult matchAndRewrite(ReshapeOpTy reshapeOp,
|
||||
PatternRewriter &rewriter) const override {
|
||||
auto srcReshapeOp = reshapeOp.src().template getDefiningOp<ReshapeOpTy>();
|
||||
auto srcReshapeOp =
|
||||
reshapeOp.getSrc().template getDefiningOp<ReshapeOpTy>();
|
||||
if (!srcReshapeOp)
|
||||
return failure();
|
||||
|
||||
ShapedType resultType = reshapeOp.getResultType();
|
||||
|
||||
if (hasNonIdentityLayout(srcReshapeOp.src().getType()) ||
|
||||
hasNonIdentityLayout(reshapeOp.src().getType()) ||
|
||||
hasNonIdentityLayout(reshapeOp.result().getType()))
|
||||
if (hasNonIdentityLayout(srcReshapeOp.getSrc().getType()) ||
|
||||
hasNonIdentityLayout(reshapeOp.getSrc().getType()) ||
|
||||
hasNonIdentityLayout(reshapeOp.getResult().getType()))
|
||||
return failure();
|
||||
|
||||
Optional<SmallVector<ReassociationIndices>> reassociationIndices =
|
||||
|
@ -190,7 +191,7 @@ struct ComposeReassociativeReshapeOps : public OpRewritePattern<ReshapeOpTy> {
|
|||
if (!reassociationIndices)
|
||||
return failure();
|
||||
rewriter.replaceOpWithNewOp<ReshapeOpTy>(
|
||||
reshapeOp, resultType, srcReshapeOp.src(), *reassociationIndices);
|
||||
reshapeOp, resultType, srcReshapeOp.getSrc(), *reassociationIndices);
|
||||
return success();
|
||||
}
|
||||
};
|
||||
|
@ -228,16 +229,16 @@ struct ComposeCollapseOfExpandOp : public OpRewritePattern<CollapseOpTy> {
|
|||
using OpRewritePattern<CollapseOpTy>::OpRewritePattern;
|
||||
LogicalResult matchAndRewrite(CollapseOpTy collapseOp,
|
||||
PatternRewriter &rewriter) const override {
|
||||
auto expandOp = collapseOp.src().template getDefiningOp<ExpandOpTy>();
|
||||
auto expandOp = collapseOp.getSrc().template getDefiningOp<ExpandOpTy>();
|
||||
if (!expandOp)
|
||||
return failure();
|
||||
|
||||
ShapedType srcType = expandOp.getSrcType();
|
||||
ShapedType resultType = collapseOp.getResultType();
|
||||
|
||||
if (hasNonIdentityLayout(collapseOp.src().getType()) ||
|
||||
hasNonIdentityLayout(expandOp.src().getType()) ||
|
||||
hasNonIdentityLayout(expandOp.result().getType()))
|
||||
if (hasNonIdentityLayout(collapseOp.getSrc().getType()) ||
|
||||
hasNonIdentityLayout(expandOp.getSrc().getType()) ||
|
||||
hasNonIdentityLayout(expandOp.getResult().getType()))
|
||||
return failure();
|
||||
|
||||
int64_t srcRank = srcType.getRank();
|
||||
|
@ -274,10 +275,10 @@ struct ComposeCollapseOfExpandOp : public OpRewritePattern<CollapseOpTy> {
|
|||
}
|
||||
if (isResultCollapsed)
|
||||
rewriter.replaceOpWithNewOp<CollapseOpTy>(
|
||||
collapseOp, resultType, expandOp.src(), composedReassociation);
|
||||
collapseOp, resultType, expandOp.getSrc(), composedReassociation);
|
||||
else
|
||||
rewriter.replaceOpWithNewOp<ExpandOpTy>(
|
||||
collapseOp, resultType, expandOp.src(), composedReassociation);
|
||||
collapseOp, resultType, expandOp.getSrc(), composedReassociation);
|
||||
return success();
|
||||
}
|
||||
};
|
||||
|
@ -287,16 +288,16 @@ struct ComposeExpandOfCollapseOp : public OpRewritePattern<ExpandOpTy> {
|
|||
using OpRewritePattern<ExpandOpTy>::OpRewritePattern;
|
||||
LogicalResult matchAndRewrite(ExpandOpTy expandOp,
|
||||
PatternRewriter &rewriter) const override {
|
||||
auto collapseOp = expandOp.src().template getDefiningOp<CollapseOpTy>();
|
||||
auto collapseOp = expandOp.getSrc().template getDefiningOp<CollapseOpTy>();
|
||||
if (!collapseOp)
|
||||
return failure();
|
||||
|
||||
ShapedType srcType = collapseOp.getSrcType();
|
||||
ShapedType resultType = expandOp.getResultType();
|
||||
|
||||
if (hasNonIdentityLayout(expandOp.src().getType()) ||
|
||||
hasNonIdentityLayout(collapseOp.src().getType()) ||
|
||||
hasNonIdentityLayout(collapseOp.result().getType()))
|
||||
if (hasNonIdentityLayout(expandOp.getSrc().getType()) ||
|
||||
hasNonIdentityLayout(collapseOp.getSrc().getType()) ||
|
||||
hasNonIdentityLayout(collapseOp.getResult().getType()))
|
||||
return failure();
|
||||
|
||||
int64_t srcRank = srcType.getRank();
|
||||
|
@ -314,7 +315,7 @@ struct ComposeExpandOfCollapseOp : public OpRewritePattern<ExpandOpTy> {
|
|||
return failure();
|
||||
|
||||
rewriter.replaceOpWithNewOp<CollapseOpTy>(
|
||||
expandOp, resultType, collapseOp.src(), *composedReassociation);
|
||||
expandOp, resultType, collapseOp.getSrc(), *composedReassociation);
|
||||
return success();
|
||||
}
|
||||
auto composedReassociation =
|
||||
|
@ -324,7 +325,7 @@ struct ComposeExpandOfCollapseOp : public OpRewritePattern<ExpandOpTy> {
|
|||
return failure();
|
||||
|
||||
rewriter.replaceOpWithNewOp<ExpandOpTy>(
|
||||
expandOp, resultType, collapseOp.src(), *composedReassociation);
|
||||
expandOp, resultType, collapseOp.getSrc(), *composedReassociation);
|
||||
return success();
|
||||
}
|
||||
|
||||
|
|
|
@ -301,7 +301,7 @@ bool mlir::isValidDim(Value value, Region *region) {
|
|||
if (auto dimOp = dyn_cast<memref::DimOp>(op))
|
||||
return isTopLevelValue(dimOp.source());
|
||||
if (auto dimOp = dyn_cast<tensor::DimOp>(op))
|
||||
return isTopLevelValue(dimOp.source());
|
||||
return isTopLevelValue(dimOp.getSource());
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -325,12 +325,12 @@ static bool isMemRefSizeValidSymbol(AnyMemRefDefOp memrefDefOp, unsigned index,
|
|||
template <typename OpTy>
|
||||
static bool isDimOpValidSymbol(OpTy dimOp, Region *region) {
|
||||
// The dim op is okay if its source is defined at the top level.
|
||||
if (isTopLevelValue(dimOp.source()))
|
||||
if (isTopLevelValue(dimOp.getSource()))
|
||||
return true;
|
||||
|
||||
// Conservatively handle remaining BlockArguments as non-valid symbols.
|
||||
// E.g. scf.for iterArgs.
|
||||
if (dimOp.source().template isa<BlockArgument>())
|
||||
if (dimOp.getSource().template isa<BlockArgument>())
|
||||
return false;
|
||||
|
||||
// The dim op is also okay if its operand memref is a view/subview whose
|
||||
|
@ -339,7 +339,7 @@ static bool isDimOpValidSymbol(OpTy dimOp, Region *region) {
|
|||
assert(index.hasValue() &&
|
||||
"expect only `dim` operations with a constant index");
|
||||
int64_t i = index.getValue();
|
||||
return TypeSwitch<Operation *, bool>(dimOp.source().getDefiningOp())
|
||||
return TypeSwitch<Operation *, bool>(dimOp.getSource().getDefiningOp())
|
||||
.Case<memref::ViewOp, memref::SubViewOp, memref::AllocOp>(
|
||||
[&](auto op) { return isMemRefSizeValidSymbol(op, i, region); })
|
||||
.Default([](Operation *) { return false; });
|
||||
|
@ -512,7 +512,7 @@ ParseResult AffineApplyOp::parse(OpAsmParser &parser, OperationState &result) {
|
|||
}
|
||||
|
||||
void AffineApplyOp::print(OpAsmPrinter &p) {
|
||||
p << " " << mapAttr();
|
||||
p << " " << getMapAttr();
|
||||
printDimAndSymbolList(operand_begin(), operand_end(),
|
||||
getAffineMap().getNumDims(), p);
|
||||
p.printOptionalAttrDict((*this)->getAttrs(), /*elidedAttrs=*/{"map"});
|
||||
|
@ -520,7 +520,7 @@ void AffineApplyOp::print(OpAsmPrinter &p) {
|
|||
|
||||
LogicalResult AffineApplyOp::verify() {
|
||||
// Check input and output dimensions match.
|
||||
AffineMap affineMap = map();
|
||||
AffineMap affineMap = getMap();
|
||||
|
||||
// Verify that operand count matches affine map dimension and symbol count.
|
||||
if (getNumOperands() != affineMap.getNumDims() + affineMap.getNumSymbols())
|
||||
|
@ -930,8 +930,9 @@ void SimplifyAffineOp<AffinePrefetchOp>::replaceAffineOp(
|
|||
PatternRewriter &rewriter, AffinePrefetchOp prefetch, AffineMap map,
|
||||
ArrayRef<Value> mapOperands) const {
|
||||
rewriter.replaceOpWithNewOp<AffinePrefetchOp>(
|
||||
prefetch, prefetch.memref(), map, mapOperands, prefetch.localityHint(),
|
||||
prefetch.isWrite(), prefetch.isDataCache());
|
||||
prefetch, prefetch.getMemref(), map, mapOperands,
|
||||
prefetch.getLocalityHint(), prefetch.getIsWrite(),
|
||||
prefetch.getIsDataCache());
|
||||
}
|
||||
template <>
|
||||
void SimplifyAffineOp<AffineStoreOp>::replaceAffineOp(
|
||||
|
@ -1578,7 +1579,8 @@ void AffineForOp::print(OpAsmPrinter &p) {
|
|||
}
|
||||
|
||||
p << ' ';
|
||||
p.printRegion(region(), /*printEntryBlockArgs=*/false, printBlockTerminators);
|
||||
p.printRegion(getRegion(), /*printEntryBlockArgs=*/false,
|
||||
printBlockTerminators);
|
||||
p.printOptionalAttrDict((*this)->getAttrs(),
|
||||
/*elidedAttrs=*/{getLowerBoundAttrStrName(),
|
||||
getUpperBoundAttrStrName(),
|
||||
|
@ -1914,7 +1916,7 @@ bool AffineForOp::matchingBoundOperandList() {
|
|||
return true;
|
||||
}
|
||||
|
||||
Region &AffineForOp::getLoopBody() { return region(); }
|
||||
Region &AffineForOp::getLoopBody() { return getRegion(); }
|
||||
|
||||
Optional<Value> AffineForOp::getSingleInductionVar() {
|
||||
return getInductionVar();
|
||||
|
@ -2103,7 +2105,7 @@ struct SimplifyDeadElse : public OpRewritePattern<AffineIfOp> {
|
|||
|
||||
LogicalResult matchAndRewrite(AffineIfOp ifOp,
|
||||
PatternRewriter &rewriter) const override {
|
||||
if (ifOp.elseRegion().empty() ||
|
||||
if (ifOp.getElseRegion().empty() ||
|
||||
!llvm::hasSingleElement(*ifOp.getElseBlock()) || ifOp.getNumResults())
|
||||
return failure();
|
||||
|
||||
|
@ -2250,11 +2252,11 @@ void AffineIfOp::print(OpAsmPrinter &p) {
|
|||
conditionAttr.getValue().getNumDims(), p);
|
||||
p.printOptionalArrowTypeList(getResultTypes());
|
||||
p << ' ';
|
||||
p.printRegion(thenRegion(), /*printEntryBlockArgs=*/false,
|
||||
p.printRegion(getThenRegion(), /*printEntryBlockArgs=*/false,
|
||||
/*printBlockTerminators=*/getNumResults());
|
||||
|
||||
// Print the 'else' regions if it has any blocks.
|
||||
auto &elseRegion = this->elseRegion();
|
||||
auto &elseRegion = this->getElseRegion();
|
||||
if (!elseRegion.empty()) {
|
||||
p << " else ";
|
||||
p.printRegion(elseRegion,
|
||||
|
@ -2454,7 +2456,7 @@ OpFoldResult AffineLoadOp::fold(ArrayRef<Attribute> cstOperands) {
|
|||
return getResult();
|
||||
|
||||
// Fold load from a global constant memref.
|
||||
auto getGlobalOp = memref().getDefiningOp<memref::GetGlobalOp>();
|
||||
auto getGlobalOp = getMemref().getDefiningOp<memref::GetGlobalOp>();
|
||||
if (!getGlobalOp)
|
||||
return {};
|
||||
// Get to the memref.global defining the symbol.
|
||||
|
@ -2577,7 +2579,8 @@ LogicalResult AffineStoreOp::fold(ArrayRef<Attribute> cstOperands,
|
|||
|
||||
template <typename T> static LogicalResult verifyAffineMinMaxOp(T op) {
|
||||
// Verify that operand count matches affine map dimension and symbol count.
|
||||
if (op.getNumOperands() != op.map().getNumDims() + op.map().getNumSymbols())
|
||||
if (op.getNumOperands() !=
|
||||
op.getMap().getNumDims() + op.getMap().getNumSymbols())
|
||||
return op.emitOpError(
|
||||
"operand count and affine map dimension and symbol count must match");
|
||||
return success();
|
||||
|
@ -2586,7 +2589,7 @@ template <typename T> static LogicalResult verifyAffineMinMaxOp(T op) {
|
|||
template <typename T> static void printAffineMinMaxOp(OpAsmPrinter &p, T op) {
|
||||
p << ' ' << op->getAttr(T::getMapAttrStrName());
|
||||
auto operands = op.getOperands();
|
||||
unsigned numDims = op.map().getNumDims();
|
||||
unsigned numDims = op.getMap().getNumDims();
|
||||
p << '(' << operands.take_front(numDims) << ')';
|
||||
|
||||
if (operands.size() != numDims)
|
||||
|
@ -2627,12 +2630,12 @@ static OpFoldResult foldMinMaxOp(T op, ArrayRef<Attribute> operands) {
|
|||
// TODO: Fold more cases:
|
||||
// min(some_affine, some_affine + constant, ...), etc.
|
||||
SmallVector<int64_t, 2> results;
|
||||
auto foldedMap = op.map().partialConstantFold(operands, &results);
|
||||
auto foldedMap = op.getMap().partialConstantFold(operands, &results);
|
||||
|
||||
// If some of the map results are not constant, try changing the map in-place.
|
||||
if (results.empty()) {
|
||||
// If the map is the same, report that folding did not happen.
|
||||
if (foldedMap == op.map())
|
||||
if (foldedMap == op.getMap())
|
||||
return {};
|
||||
op->setAttr("map", AffineMapAttr::get(foldedMap));
|
||||
return op.getResult();
|
||||
|
@ -2850,9 +2853,9 @@ struct CanonicalizeSingleResultAffineMinMaxOp : public OpRewritePattern<T> {
|
|||
|
||||
LogicalResult matchAndRewrite(T affineOp,
|
||||
PatternRewriter &rewriter) const override {
|
||||
if (affineOp.map().getNumResults() != 1)
|
||||
if (affineOp.getMap().getNumResults() != 1)
|
||||
return failure();
|
||||
rewriter.replaceOpWithNewOp<AffineApplyOp>(affineOp, affineOp.map(),
|
||||
rewriter.replaceOpWithNewOp<AffineApplyOp>(affineOp, affineOp.getMap(),
|
||||
affineOp.getOperands());
|
||||
return success();
|
||||
}
|
||||
|
@ -2971,14 +2974,14 @@ ParseResult AffinePrefetchOp::parse(OpAsmParser &parser,
|
|||
}
|
||||
|
||||
void AffinePrefetchOp::print(OpAsmPrinter &p) {
|
||||
p << " " << memref() << '[';
|
||||
p << " " << getMemref() << '[';
|
||||
AffineMapAttr mapAttr =
|
||||
(*this)->getAttrOfType<AffineMapAttr>(getMapAttrStrName());
|
||||
if (mapAttr)
|
||||
p.printAffineMapOfSSAIds(mapAttr, getMapOperands());
|
||||
p << ']' << ", " << (isWrite() ? "write" : "read") << ", "
|
||||
<< "locality<" << localityHint() << ">, "
|
||||
<< (isDataCache() ? "data" : "instr");
|
||||
p << ']' << ", " << (getIsWrite() ? "write" : "read") << ", "
|
||||
<< "locality<" << getLocalityHint() << ">, "
|
||||
<< (getIsDataCache() ? "data" : "instr");
|
||||
p.printOptionalAttrDict(
|
||||
(*this)->getAttrs(),
|
||||
/*elidedAttrs=*/{getMapAttrStrName(), getLocalityHintAttrStrName(),
|
||||
|
@ -3118,40 +3121,40 @@ void AffineParallelOp::build(OpBuilder &builder, OperationState &result,
|
|||
ensureTerminator(*bodyRegion, builder, result.location);
|
||||
}
|
||||
|
||||
Region &AffineParallelOp::getLoopBody() { return region(); }
|
||||
Region &AffineParallelOp::getLoopBody() { return getRegion(); }
|
||||
|
||||
unsigned AffineParallelOp::getNumDims() { return steps().size(); }
|
||||
unsigned AffineParallelOp::getNumDims() { return getSteps().size(); }
|
||||
|
||||
AffineParallelOp::operand_range AffineParallelOp::getLowerBoundsOperands() {
|
||||
return getOperands().take_front(lowerBoundsMap().getNumInputs());
|
||||
return getOperands().take_front(getLowerBoundsMap().getNumInputs());
|
||||
}
|
||||
|
||||
AffineParallelOp::operand_range AffineParallelOp::getUpperBoundsOperands() {
|
||||
return getOperands().drop_front(lowerBoundsMap().getNumInputs());
|
||||
return getOperands().drop_front(getLowerBoundsMap().getNumInputs());
|
||||
}
|
||||
|
||||
AffineMap AffineParallelOp::getLowerBoundMap(unsigned pos) {
|
||||
auto values = lowerBoundsGroups().getValues<int32_t>();
|
||||
auto values = getLowerBoundsGroups().getValues<int32_t>();
|
||||
unsigned start = 0;
|
||||
for (unsigned i = 0; i < pos; ++i)
|
||||
start += values[i];
|
||||
return lowerBoundsMap().getSliceMap(start, values[pos]);
|
||||
return getLowerBoundsMap().getSliceMap(start, values[pos]);
|
||||
}
|
||||
|
||||
AffineMap AffineParallelOp::getUpperBoundMap(unsigned pos) {
|
||||
auto values = upperBoundsGroups().getValues<int32_t>();
|
||||
auto values = getUpperBoundsGroups().getValues<int32_t>();
|
||||
unsigned start = 0;
|
||||
for (unsigned i = 0; i < pos; ++i)
|
||||
start += values[i];
|
||||
return upperBoundsMap().getSliceMap(start, values[pos]);
|
||||
return getUpperBoundsMap().getSliceMap(start, values[pos]);
|
||||
}
|
||||
|
||||
AffineValueMap AffineParallelOp::getLowerBoundsValueMap() {
|
||||
return AffineValueMap(lowerBoundsMap(), getLowerBoundsOperands());
|
||||
return AffineValueMap(getLowerBoundsMap(), getLowerBoundsOperands());
|
||||
}
|
||||
|
||||
AffineValueMap AffineParallelOp::getUpperBoundsValueMap() {
|
||||
return AffineValueMap(upperBoundsMap(), getUpperBoundsOperands());
|
||||
return AffineValueMap(getUpperBoundsMap(), getUpperBoundsOperands());
|
||||
}
|
||||
|
||||
Optional<SmallVector<int64_t, 8>> AffineParallelOp::getConstantRanges() {
|
||||
|
@ -3174,7 +3177,7 @@ Optional<SmallVector<int64_t, 8>> AffineParallelOp::getConstantRanges() {
|
|||
return out;
|
||||
}
|
||||
|
||||
Block *AffineParallelOp::getBody() { return ®ion().front(); }
|
||||
Block *AffineParallelOp::getBody() { return &getRegion().front(); }
|
||||
|
||||
OpBuilder AffineParallelOp::getBodyBuilder() {
|
||||
return OpBuilder(getBody(), std::prev(getBody()->end()));
|
||||
|
@ -3190,7 +3193,7 @@ void AffineParallelOp::setLowerBounds(ValueRange lbOperands, AffineMap map) {
|
|||
newOperands.append(ubOperands.begin(), ubOperands.end());
|
||||
(*this)->setOperands(newOperands);
|
||||
|
||||
lowerBoundsMapAttr(AffineMapAttr::get(map));
|
||||
setLowerBoundsMapAttr(AffineMapAttr::get(map));
|
||||
}
|
||||
|
||||
void AffineParallelOp::setUpperBounds(ValueRange ubOperands, AffineMap map) {
|
||||
|
@ -3201,62 +3204,62 @@ void AffineParallelOp::setUpperBounds(ValueRange ubOperands, AffineMap map) {
|
|||
newOperands.append(ubOperands.begin(), ubOperands.end());
|
||||
(*this)->setOperands(newOperands);
|
||||
|
||||
upperBoundsMapAttr(AffineMapAttr::get(map));
|
||||
setUpperBoundsMapAttr(AffineMapAttr::get(map));
|
||||
}
|
||||
|
||||
void AffineParallelOp::setLowerBoundsMap(AffineMap map) {
|
||||
AffineMap lbMap = lowerBoundsMap();
|
||||
AffineMap lbMap = getLowerBoundsMap();
|
||||
assert(lbMap.getNumDims() == map.getNumDims() &&
|
||||
lbMap.getNumSymbols() == map.getNumSymbols());
|
||||
(void)lbMap;
|
||||
lowerBoundsMapAttr(AffineMapAttr::get(map));
|
||||
setLowerBoundsMapAttr(AffineMapAttr::get(map));
|
||||
}
|
||||
|
||||
void AffineParallelOp::setUpperBoundsMap(AffineMap map) {
|
||||
AffineMap ubMap = upperBoundsMap();
|
||||
AffineMap ubMap = getUpperBoundsMap();
|
||||
assert(ubMap.getNumDims() == map.getNumDims() &&
|
||||
ubMap.getNumSymbols() == map.getNumSymbols());
|
||||
(void)ubMap;
|
||||
upperBoundsMapAttr(AffineMapAttr::get(map));
|
||||
setUpperBoundsMapAttr(AffineMapAttr::get(map));
|
||||
}
|
||||
|
||||
void AffineParallelOp::setSteps(ArrayRef<int64_t> newSteps) {
|
||||
stepsAttr(getBodyBuilder().getI64ArrayAttr(newSteps));
|
||||
setStepsAttr(getBodyBuilder().getI64ArrayAttr(newSteps));
|
||||
}
|
||||
|
||||
LogicalResult AffineParallelOp::verify() {
|
||||
auto numDims = getNumDims();
|
||||
if (lowerBoundsGroups().getNumElements() != numDims ||
|
||||
upperBoundsGroups().getNumElements() != numDims ||
|
||||
steps().size() != numDims || getBody()->getNumArguments() != numDims) {
|
||||
if (getLowerBoundsGroups().getNumElements() != numDims ||
|
||||
getUpperBoundsGroups().getNumElements() != numDims ||
|
||||
getSteps().size() != numDims || getBody()->getNumArguments() != numDims) {
|
||||
return emitOpError() << "the number of region arguments ("
|
||||
<< getBody()->getNumArguments()
|
||||
<< ") and the number of map groups for lower ("
|
||||
<< lowerBoundsGroups().getNumElements()
|
||||
<< getLowerBoundsGroups().getNumElements()
|
||||
<< ") and upper bound ("
|
||||
<< upperBoundsGroups().getNumElements()
|
||||
<< "), and the number of steps (" << steps().size()
|
||||
<< getUpperBoundsGroups().getNumElements()
|
||||
<< "), and the number of steps (" << getSteps().size()
|
||||
<< ") must all match";
|
||||
}
|
||||
|
||||
unsigned expectedNumLBResults = 0;
|
||||
for (APInt v : lowerBoundsGroups())
|
||||
for (APInt v : getLowerBoundsGroups())
|
||||
expectedNumLBResults += v.getZExtValue();
|
||||
if (expectedNumLBResults != lowerBoundsMap().getNumResults())
|
||||
if (expectedNumLBResults != getLowerBoundsMap().getNumResults())
|
||||
return emitOpError() << "expected lower bounds map to have "
|
||||
<< expectedNumLBResults << " results";
|
||||
unsigned expectedNumUBResults = 0;
|
||||
for (APInt v : upperBoundsGroups())
|
||||
for (APInt v : getUpperBoundsGroups())
|
||||
expectedNumUBResults += v.getZExtValue();
|
||||
if (expectedNumUBResults != upperBoundsMap().getNumResults())
|
||||
if (expectedNumUBResults != getUpperBoundsMap().getNumResults())
|
||||
return emitOpError() << "expected upper bounds map to have "
|
||||
<< expectedNumUBResults << " results";
|
||||
|
||||
if (reductions().size() != getNumResults())
|
||||
if (getReductions().size() != getNumResults())
|
||||
return emitOpError("a reduction must be specified for each output");
|
||||
|
||||
// Verify reduction ops are all valid
|
||||
for (Attribute attr : reductions()) {
|
||||
for (Attribute attr : getReductions()) {
|
||||
auto intAttr = attr.dyn_cast<IntegerAttr>();
|
||||
if (!intAttr || !arith::symbolizeAtomicRMWKind(intAttr.getInt()))
|
||||
return emitOpError("invalid reduction attribute");
|
||||
|
@ -3265,11 +3268,11 @@ LogicalResult AffineParallelOp::verify() {
|
|||
// Verify that the bound operands are valid dimension/symbols.
|
||||
/// Lower bounds.
|
||||
if (failed(verifyDimAndSymbolIdentifiers(*this, getLowerBoundsOperands(),
|
||||
lowerBoundsMap().getNumDims())))
|
||||
getLowerBoundsMap().getNumDims())))
|
||||
return failure();
|
||||
/// Upper bounds.
|
||||
if (failed(verifyDimAndSymbolIdentifiers(*this, getUpperBoundsOperands(),
|
||||
upperBoundsMap().getNumDims())))
|
||||
getUpperBoundsMap().getNumDims())))
|
||||
return failure();
|
||||
return success();
|
||||
}
|
||||
|
@ -3342,10 +3345,10 @@ static void printMinMaxBound(OpAsmPrinter &p, AffineMapAttr mapAttr,
|
|||
|
||||
void AffineParallelOp::print(OpAsmPrinter &p) {
|
||||
p << " (" << getBody()->getArguments() << ") = (";
|
||||
printMinMaxBound(p, lowerBoundsMapAttr(), lowerBoundsGroupsAttr(),
|
||||
printMinMaxBound(p, getLowerBoundsMapAttr(), getLowerBoundsGroupsAttr(),
|
||||
getLowerBoundsOperands(), "max");
|
||||
p << ") to (";
|
||||
printMinMaxBound(p, upperBoundsMapAttr(), upperBoundsGroupsAttr(),
|
||||
printMinMaxBound(p, getUpperBoundsMapAttr(), getUpperBoundsGroupsAttr(),
|
||||
getUpperBoundsOperands(), "min");
|
||||
p << ')';
|
||||
SmallVector<int64_t, 8> steps = getSteps();
|
||||
|
@ -3357,7 +3360,7 @@ void AffineParallelOp::print(OpAsmPrinter &p) {
|
|||
}
|
||||
if (getNumResults()) {
|
||||
p << " reduce (";
|
||||
llvm::interleaveComma(reductions(), p, [&](auto &attr) {
|
||||
llvm::interleaveComma(getReductions(), p, [&](auto &attr) {
|
||||
arith::AtomicRMWKind sym = *arith::symbolizeAtomicRMWKind(
|
||||
attr.template cast<IntegerAttr>().getInt());
|
||||
p << "\"" << arith::stringifyAtomicRMWKind(sym) << "\"";
|
||||
|
@ -3366,7 +3369,7 @@ void AffineParallelOp::print(OpAsmPrinter &p) {
|
|||
}
|
||||
|
||||
p << ' ';
|
||||
p.printRegion(region(), /*printEntryBlockArgs=*/false,
|
||||
p.printRegion(getRegion(), /*printEntryBlockArgs=*/false,
|
||||
/*printBlockTerminators=*/getNumResults());
|
||||
p.printOptionalAttrDict(
|
||||
(*this)->getAttrs(),
|
||||
|
|
|
@ -181,12 +181,12 @@ bool checkInvarianceOfNestedIfOps(Operation *op, Value indVar,
|
|||
assert(isa<AffineIfOp>(op));
|
||||
auto ifOp = cast<AffineIfOp>(op);
|
||||
|
||||
if (!areAllOpsInTheBlockListInvariant(ifOp.thenRegion(), indVar, iterArgs,
|
||||
if (!areAllOpsInTheBlockListInvariant(ifOp.getThenRegion(), indVar, iterArgs,
|
||||
opsWithUsers, opsToHoist)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!areAllOpsInTheBlockListInvariant(ifOp.elseRegion(), indVar, iterArgs,
|
||||
if (!areAllOpsInTheBlockListInvariant(ifOp.getElseRegion(), indVar, iterArgs,
|
||||
opsWithUsers, opsToHoist)) {
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -1668,7 +1668,7 @@ stripmineSink(AffineForOp forOp, uint64_t factor,
|
|||
newForOp.getBody()->getOperations().begin(),
|
||||
t.getBody()->getOperations(), begin, std::next(begin, nOps));
|
||||
replaceAllUsesInRegionWith(iv, newForOp.getInductionVar(),
|
||||
newForOp.region());
|
||||
newForOp.getRegion());
|
||||
innerLoops.push_back(newForOp);
|
||||
}
|
||||
|
||||
|
@ -1813,7 +1813,7 @@ LogicalResult mlir::coalesceLoops(MutableArrayRef<AffineForOp> loops) {
|
|||
applyOperands);
|
||||
}
|
||||
replaceAllUsesInRegionWith(loops[idx - 1].getInductionVar(),
|
||||
inductionVariable, loops.back().region());
|
||||
inductionVariable, loops.back().getRegion());
|
||||
}
|
||||
|
||||
// 4. Move the operations from the innermost just above the second-outermost
|
||||
|
|
|
@ -370,7 +370,7 @@ mlir::affineParallelize(AffineForOp forOp,
|
|||
llvm::makeArrayRef(upperBoundMap), upperBoundOperands,
|
||||
llvm::makeArrayRef(forOp.getStep()));
|
||||
// Steal the body of the old affine for op.
|
||||
newPloop.region().takeBody(forOp.region());
|
||||
newPloop.getRegion().takeBody(forOp.getRegion());
|
||||
Operation *yieldOp = &newPloop.getBody()->back();
|
||||
|
||||
// Handle the initial values of reductions because the parallel loop always
|
||||
|
@ -487,7 +487,7 @@ void mlir::normalizeAffineParallel(AffineParallelOp op) {
|
|||
if (op.hasMinMaxBounds())
|
||||
return;
|
||||
|
||||
AffineMap lbMap = op.lowerBoundsMap();
|
||||
AffineMap lbMap = op.getLowerBoundsMap();
|
||||
SmallVector<int64_t, 8> steps = op.getSteps();
|
||||
// No need to do any work if the parallel op is already normalized.
|
||||
bool isAlreadyNormalized =
|
||||
|
|
|
@ -346,7 +346,7 @@ struct FoldDimOfAllocTensorOp : public OpRewritePattern<tensor::DimOp> {
|
|||
LogicalResult matchAndRewrite(tensor::DimOp dimOp,
|
||||
PatternRewriter &rewriter) const override {
|
||||
Optional<int64_t> maybeConstantIndex = dimOp.getConstantIndex();
|
||||
auto allocTensorOp = dimOp.source().getDefiningOp<AllocTensorOp>();
|
||||
auto allocTensorOp = dimOp.getSource().getDefiningOp<AllocTensorOp>();
|
||||
if (!allocTensorOp || !maybeConstantIndex)
|
||||
return failure();
|
||||
if (!allocTensorOp.getType().isDynamicDim(*maybeConstantIndex))
|
||||
|
@ -558,12 +558,12 @@ struct DimOfToTensorFolder : public OpRewritePattern<tensor::DimOp> {
|
|||
|
||||
LogicalResult matchAndRewrite(tensor::DimOp dimOp,
|
||||
PatternRewriter &rewriter) const override {
|
||||
auto memrefToTensorOp = dimOp.source().getDefiningOp<ToTensorOp>();
|
||||
auto memrefToTensorOp = dimOp.getSource().getDefiningOp<ToTensorOp>();
|
||||
if (!memrefToTensorOp)
|
||||
return failure();
|
||||
|
||||
rewriter.replaceOpWithNewOp<memref::DimOp>(
|
||||
dimOp, memrefToTensorOp.getMemref(), dimOp.index());
|
||||
dimOp, memrefToTensorOp.getMemref(), dimOp.getIndex());
|
||||
return success();
|
||||
}
|
||||
};
|
||||
|
|
|
@ -202,13 +202,13 @@ mlir::bufferization::insertSliceAnchoredAllocTensorEliminationStep(
|
|||
return false;
|
||||
|
||||
// Collect all values that are needed to construct the replacement op.
|
||||
neededValues.append(insertSliceOp.offsets().begin(),
|
||||
insertSliceOp.offsets().end());
|
||||
neededValues.append(insertSliceOp.sizes().begin(),
|
||||
insertSliceOp.sizes().end());
|
||||
neededValues.append(insertSliceOp.strides().begin(),
|
||||
insertSliceOp.strides().end());
|
||||
neededValues.push_back(insertSliceOp.dest());
|
||||
neededValues.append(insertSliceOp.getOffsets().begin(),
|
||||
insertSliceOp.getOffsets().end());
|
||||
neededValues.append(insertSliceOp.getSizes().begin(),
|
||||
insertSliceOp.getSizes().end());
|
||||
neededValues.append(insertSliceOp.getStrides().begin(),
|
||||
insertSliceOp.getStrides().end());
|
||||
neededValues.push_back(insertSliceOp.getDest());
|
||||
|
||||
return true;
|
||||
},
|
||||
|
@ -221,20 +221,20 @@ mlir::bufferization::insertSliceAnchoredAllocTensorEliminationStep(
|
|||
SmallVector<OpFoldResult> mixedSizes = insertOp.getMixedSizes();
|
||||
SmallVector<OpFoldResult> mixedStrides = insertOp.getMixedStrides();
|
||||
OffsetSizeAndStrideOpInterface::expandToRank(
|
||||
insertOp.dest(), mixedOffsets, mixedSizes, mixedStrides,
|
||||
insertOp.getDest(), mixedOffsets, mixedSizes, mixedStrides,
|
||||
[&](Value target, int64_t dim) -> OpFoldResult {
|
||||
auto shapedType = target.getType().cast<ShapedType>();
|
||||
if (shapedType.isDynamicDim(dim))
|
||||
return b.create<tensor::DimOp>(loc, target, dim).result();
|
||||
return b.create<tensor::DimOp>(loc, target, dim).getResult();
|
||||
return b.getIndexAttr(shapedType.getDimSize(dim));
|
||||
});
|
||||
auto t = tensor::ExtractSliceOp::inferRankReducedResultType(
|
||||
insertOp.getSourceType().getRank(),
|
||||
insertOp.dest().getType().cast<RankedTensorType>(), mixedOffsets,
|
||||
insertOp.getDest().getType().cast<RankedTensorType>(), mixedOffsets,
|
||||
mixedSizes, mixedStrides);
|
||||
auto extractOp = b.create<tensor::ExtractSliceOp>(
|
||||
loc, t, insertOp.dest(), mixedOffsets, mixedSizes, mixedStrides);
|
||||
return extractOp.result();
|
||||
loc, t, insertOp.getDest(), mixedOffsets, mixedSizes, mixedStrides);
|
||||
return extractOp.getResult();
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ Operation *EmitCDialect::materializeConstant(OpBuilder &builder,
|
|||
//===----------------------------------------------------------------------===//
|
||||
|
||||
LogicalResult ApplyOp::verify() {
|
||||
StringRef applicableOperatorStr = applicableOperator();
|
||||
StringRef applicableOperatorStr = getApplicableOperator();
|
||||
|
||||
// Applicable operator must not be empty.
|
||||
if (applicableOperatorStr.empty())
|
||||
|
@ -81,10 +81,10 @@ bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
|
|||
|
||||
LogicalResult emitc::CallOp::verify() {
|
||||
// Callee must not be empty.
|
||||
if (callee().empty())
|
||||
if (getCallee().empty())
|
||||
return emitOpError("callee must not be empty");
|
||||
|
||||
if (Optional<ArrayAttr> argsAttr = args()) {
|
||||
if (Optional<ArrayAttr> argsAttr = getArgs()) {
|
||||
for (Attribute arg : *argsAttr) {
|
||||
if (arg.getType().isa<IndexType>()) {
|
||||
int64_t index = arg.cast<IntegerAttr>().getInt();
|
||||
|
@ -100,7 +100,7 @@ LogicalResult emitc::CallOp::verify() {
|
|||
}
|
||||
}
|
||||
|
||||
if (Optional<ArrayAttr> templateArgsAttr = template_args()) {
|
||||
if (Optional<ArrayAttr> templateArgsAttr = getTemplateArgs()) {
|
||||
for (Attribute tArg : *templateArgsAttr) {
|
||||
if (!tArg.isa<TypeAttr>() && !tArg.isa<IntegerAttr>() &&
|
||||
!tArg.isa<FloatAttr>() && !tArg.isa<emitc::OpaqueAttr>())
|
||||
|
@ -117,7 +117,7 @@ LogicalResult emitc::CallOp::verify() {
|
|||
|
||||
/// The constant op requires that the attribute's type matches the return type.
|
||||
LogicalResult emitc::ConstantOp::verify() {
|
||||
Attribute value = valueAttr();
|
||||
Attribute value = getValueAttr();
|
||||
Type type = getType();
|
||||
if (!value.getType().isa<NoneType>() && type != value.getType())
|
||||
return emitOpError() << "requires attribute's type (" << value.getType()
|
||||
|
@ -127,7 +127,7 @@ LogicalResult emitc::ConstantOp::verify() {
|
|||
|
||||
OpFoldResult emitc::ConstantOp::fold(ArrayRef<Attribute> operands) {
|
||||
assert(operands.empty() && "constant has no operands");
|
||||
return value();
|
||||
return getValue();
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -135,12 +135,12 @@ OpFoldResult emitc::ConstantOp::fold(ArrayRef<Attribute> operands) {
|
|||
//===----------------------------------------------------------------------===//
|
||||
|
||||
void IncludeOp::print(OpAsmPrinter &p) {
|
||||
bool standardInclude = is_standard_include();
|
||||
bool standardInclude = getIsStandardInclude();
|
||||
|
||||
p << " ";
|
||||
if (standardInclude)
|
||||
p << "<";
|
||||
p << "\"" << include() << "\"";
|
||||
p << "\"" << getInclude() << "\"";
|
||||
if (standardInclude)
|
||||
p << ">";
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ ParseResult IncludeOp::parse(OpAsmParser &parser, OperationState &result) {
|
|||
|
||||
/// The variable op requires that the attribute's type matches the return type.
|
||||
LogicalResult emitc::VariableOp::verify() {
|
||||
Attribute value = valueAttr();
|
||||
Attribute value = getValueAttr();
|
||||
Type type = getType();
|
||||
if (!value.getType().isa<NoneType>() && type != value.getType())
|
||||
return emitOpError() << "requires attribute's type (" << value.getType()
|
||||
|
|
|
@ -491,14 +491,14 @@ struct FoldFillWithTensorReshape : OpRewritePattern<TensorReshapeOp> {
|
|||
using OpRewritePattern<TensorReshapeOp>::OpRewritePattern;
|
||||
LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
|
||||
PatternRewriter &rewriter) const override {
|
||||
auto oldFill = reshapeOp.src().template getDefiningOp<FillOp>();
|
||||
auto oldFill = reshapeOp.getSrc().template getDefiningOp<FillOp>();
|
||||
if (!oldFill)
|
||||
return failure();
|
||||
|
||||
Location loc = oldFill.getLoc();
|
||||
auto newInit = rewriter.create<TensorReshapeOp>(
|
||||
loc, reshapeOp.getResultType(), oldFill.output(),
|
||||
reshapeOp.reassociation());
|
||||
reshapeOp.getReassociation());
|
||||
rewriter.replaceOpWithNewOp<FillOp>(reshapeOp, ValueRange{oldFill.value()},
|
||||
ValueRange{newInit});
|
||||
|
||||
|
@ -513,7 +513,7 @@ struct FoldFillWithPad final : public OpRewritePattern<tensor::PadOp> {
|
|||
|
||||
LogicalResult matchAndRewrite(tensor::PadOp padOp,
|
||||
PatternRewriter &rewriter) const override {
|
||||
auto fillOp = padOp.source().getDefiningOp<linalg::FillOp>();
|
||||
auto fillOp = padOp.getSource().getDefiningOp<linalg::FillOp>();
|
||||
if (!fillOp)
|
||||
return failure();
|
||||
|
||||
|
@ -553,7 +553,7 @@ struct FoldInsertPadIntoFill : public OpRewritePattern<tensor::InsertSliceOp> {
|
|||
|
||||
LogicalResult matchAndRewrite(tensor::InsertSliceOp insertOp,
|
||||
PatternRewriter &rewriter) const override {
|
||||
auto srcPadOp = insertOp.source().getDefiningOp<tensor::PadOp>();
|
||||
auto srcPadOp = insertOp.getSource().getDefiningOp<tensor::PadOp>();
|
||||
if (!srcPadOp)
|
||||
return failure();
|
||||
|
||||
|
@ -562,7 +562,7 @@ struct FoldInsertPadIntoFill : public OpRewritePattern<tensor::InsertSliceOp> {
|
|||
|
||||
// Walk back the tensor.insert_slice chain and find the first destination
|
||||
// value at the start of the chain.
|
||||
Value firstDest = insertOp.dest();
|
||||
Value firstDest = insertOp.getDest();
|
||||
while (auto prevOp = firstDest.getDefiningOp<tensor::InsertSliceOp>()) {
|
||||
if (prevOp.getType().getRank() != prevOp.getSourceType().getRank())
|
||||
return failure();
|
||||
|
@ -593,7 +593,7 @@ struct FoldInsertPadIntoFill : public OpRewritePattern<tensor::InsertSliceOp> {
|
|||
|
||||
if (!disjoint)
|
||||
break;
|
||||
firstDest = prevOp.dest();
|
||||
firstDest = prevOp.getDest();
|
||||
}
|
||||
|
||||
// Check whether the first destination is a fill op. For overlapped cases,
|
||||
|
@ -633,12 +633,13 @@ struct FoldInsertPadIntoFill : public OpRewritePattern<tensor::InsertSliceOp> {
|
|||
SmallVector<OpFoldResult, 4> newSizes;
|
||||
for (int i = 0, e = srcPadOp.getSourceType().getRank(); i < e; ++i) {
|
||||
newSizes.push_back(
|
||||
rewriter.create<tensor::DimOp>(loc, srcPadOp.source(), i).result());
|
||||
rewriter.create<tensor::DimOp>(loc, srcPadOp.getSource(), i)
|
||||
.getResult());
|
||||
}
|
||||
|
||||
rewriter.replaceOpWithNewOp<tensor::InsertSliceOp>(
|
||||
insertOp, srcPadOp.source(), insertOp.dest(), newOffsets, newSizes,
|
||||
insertOp.getMixedStrides());
|
||||
insertOp, srcPadOp.getSource(), insertOp.getDest(), newOffsets,
|
||||
newSizes, insertOp.getMixedStrides());
|
||||
return success();
|
||||
}
|
||||
};
|
||||
|
@ -1216,13 +1217,13 @@ struct FoldInitTensorWithExtractSliceOp
|
|||
|
||||
LogicalResult matchAndRewrite(tensor::ExtractSliceOp sliceOp,
|
||||
PatternRewriter &rewriter) const override {
|
||||
if (!sliceOp.source().getDefiningOp<linalg::InitTensorOp>())
|
||||
if (!sliceOp.getSource().getDefiningOp<linalg::InitTensorOp>())
|
||||
return failure();
|
||||
// ExtractSliceOp may be rank-reducing; its dynamic sizes must be preserved
|
||||
// as well as its result type.
|
||||
rewriter.replaceOpWithNewOp<linalg::InitTensorOp>(
|
||||
sliceOp, sliceOp.sizes(),
|
||||
sliceOp.result().getType().cast<RankedTensorType>().getShape(),
|
||||
sliceOp, sliceOp.getSizes(),
|
||||
sliceOp.getResult().getType().cast<RankedTensorType>().getShape(),
|
||||
sliceOp.getSourceType().getElementType());
|
||||
return success();
|
||||
}
|
||||
|
@ -1235,7 +1236,7 @@ struct FoldInitTensorWithTensorReshapeOp
|
|||
|
||||
LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
|
||||
PatternRewriter &rewriter) const override {
|
||||
if (!reshapeOp.src().template getDefiningOp<InitTensorOp>())
|
||||
if (!reshapeOp.getSrc().template getDefiningOp<InitTensorOp>())
|
||||
return failure();
|
||||
Location loc = reshapeOp.getLoc();
|
||||
ReifiedRankedShapedTypeDims resultShapes;
|
||||
|
@ -1264,7 +1265,7 @@ struct FoldInitTensorWithDimOp : public OpRewritePattern<tensor::DimOp> {
|
|||
LogicalResult matchAndRewrite(tensor::DimOp dimOp,
|
||||
PatternRewriter &rewriter) const override {
|
||||
Optional<int64_t> maybeConstantIndex = dimOp.getConstantIndex();
|
||||
auto initTensorOp = dimOp.source().getDefiningOp<linalg::InitTensorOp>();
|
||||
auto initTensorOp = dimOp.getSource().getDefiningOp<linalg::InitTensorOp>();
|
||||
if (!initTensorOp || !maybeConstantIndex)
|
||||
return failure();
|
||||
if (!initTensorOp.isDynamicSize(*maybeConstantIndex))
|
||||
|
@ -1299,7 +1300,7 @@ struct FoldInitTensorWithTensorCastOp
|
|||
PatternRewriter &rewriter) const override {
|
||||
if (!canFoldIntoProducerOp(castOp))
|
||||
return failure();
|
||||
auto producer = castOp.source().getDefiningOp<InitTensorOp>();
|
||||
auto producer = castOp.getSource().getDefiningOp<InitTensorOp>();
|
||||
if (!producer)
|
||||
return failure();
|
||||
|
||||
|
@ -1581,7 +1582,7 @@ struct FoldTensorCastProducerOp : public OpInterfaceRewritePattern<LinalgOp> {
|
|||
for (OpOperand *opOperand : op.getInputOperands()) {
|
||||
auto tensorCastOp = opOperand->get().getDefiningOp<tensor::CastOp>();
|
||||
newOperands.push_back(canFoldIntoConsumerOp(tensorCastOp)
|
||||
? tensorCastOp.source()
|
||||
? tensorCastOp.getSource()
|
||||
: opOperand->get());
|
||||
}
|
||||
// Init tensors may fold, in which case the resultType must also change.
|
||||
|
@ -1622,7 +1623,7 @@ struct FoldTensorCastConsumerOp : public OpRewritePattern<tensor::CastOp> {
|
|||
PatternRewriter &rewriter) const override {
|
||||
if (!tensor::canFoldIntoProducerOp(castOp))
|
||||
return failure();
|
||||
auto linalgOp = castOp.source().getDefiningOp<LinalgOp>();
|
||||
auto linalgOp = castOp.getSource().getDefiningOp<LinalgOp>();
|
||||
if (!linalgOp)
|
||||
return failure();
|
||||
|
||||
|
@ -1630,7 +1631,7 @@ struct FoldTensorCastConsumerOp : public OpRewritePattern<tensor::CastOp> {
|
|||
rewriter.setInsertionPoint(linalgOp);
|
||||
|
||||
Location loc = linalgOp.getLoc();
|
||||
OpResult resultValue = castOp.source().cast<OpResult>();
|
||||
OpResult resultValue = castOp.getSource().cast<OpResult>();
|
||||
unsigned resultNumber = resultValue.getResultNumber();
|
||||
auto resultType = castOp->getResult(0).getType().cast<RankedTensorType>();
|
||||
// Replace the `outs` for the result with a `tensor.cast`. This cast is now
|
||||
|
@ -1681,7 +1682,7 @@ static void populateMap(LinalgOp linalgOp, ArrayRef<OpOperand *> operands,
|
|||
ArrayRef<int64_t> sourceShape = sourceType.getShape();
|
||||
if (parentOp) {
|
||||
if (auto castOp = dyn_cast<tensor::CastOp>(parentOp)) {
|
||||
Value castSource = castOp.source();
|
||||
Value castSource = castOp.getSource();
|
||||
auto castSourceType = castSource.getType().cast<RankedTensorType>();
|
||||
if (castSourceType.hasStaticShape())
|
||||
sourceShape = castSourceType.getShape();
|
||||
|
|
|
@ -49,7 +49,7 @@ struct BubbleUpExtractSliceOpPattern
|
|||
|
||||
LogicalResult matchAndRewrite(tensor::ExtractSliceOp sliceOp,
|
||||
PatternRewriter &rewriter) const final {
|
||||
Value source = sliceOp.source();
|
||||
Value source = sliceOp.getSource();
|
||||
auto linalgOp = source.getDefiningOp<LinalgOp>();
|
||||
if (!linalgOp) {
|
||||
return rewriter.notifyMatchFailure(sliceOp,
|
||||
|
|
|
@ -506,7 +506,7 @@ struct UseRankReducedExtractSliceOp
|
|||
|
||||
Location loc = sliceOp.getLoc();
|
||||
Value newSlice = rewriter.create<tensor::ExtractSliceOp>(
|
||||
loc, rankReducedType, sliceOp.source(), offsets, sizes, strides);
|
||||
loc, rankReducedType, sliceOp.getSource(), offsets, sizes, strides);
|
||||
rewriter.replaceOpWithNewOp<tensor::ExpandShapeOp>(
|
||||
sliceOp, resultType, newSlice, *reassociation);
|
||||
return success();
|
||||
|
@ -530,10 +530,11 @@ struct UseRankReducedInsertSliceOp
|
|||
return failure();
|
||||
Location loc = insertOp.getLoc();
|
||||
auto reshapedSource = rewriter.create<tensor::CollapseShapeOp>(
|
||||
loc, insertOp.source(), *reassociation);
|
||||
loc, insertOp.getSource(), *reassociation);
|
||||
rewriter.replaceOpWithNewOp<tensor::InsertSliceOp>(
|
||||
insertOp, reshapedSource, insertOp.dest(), insertOp.getMixedOffsets(),
|
||||
insertOp.getMixedSizes(), insertOp.getMixedStrides());
|
||||
insertOp, reshapedSource, insertOp.getDest(),
|
||||
insertOp.getMixedOffsets(), insertOp.getMixedSizes(),
|
||||
insertOp.getMixedStrides());
|
||||
return success();
|
||||
}
|
||||
};
|
||||
|
|
|
@ -717,8 +717,8 @@ fuseWithReshapeByExpansion(GenericOp genericOp, Operation *reshapeOp,
|
|||
expandedOpOperands.reserve(genericOp.getNumInputs());
|
||||
for (OpOperand *opOperand : genericOp.getInputOperands()) {
|
||||
if (opOperand == fusableOpOperand) {
|
||||
expandedOpOperands.push_back(isExpanding ? expandingReshapeOp.src()
|
||||
: collapsingReshapeOp.src());
|
||||
expandedOpOperands.push_back(isExpanding ? expandingReshapeOp.getSrc()
|
||||
: collapsingReshapeOp.getSrc());
|
||||
continue;
|
||||
}
|
||||
if (genericOp.isInputTensor(opOperand)) {
|
||||
|
@ -865,7 +865,7 @@ struct FoldReshapeWithGenericOpByExpansion
|
|||
LogicalResult matchAndRewrite(tensor::ExpandShapeOp reshapeOp,
|
||||
PatternRewriter &rewriter) const override {
|
||||
// Fold only if all constraints of fusing with reshape by expansion are met.
|
||||
GenericOp producer = reshapeOp.src().getDefiningOp<GenericOp>();
|
||||
GenericOp producer = reshapeOp.getSrc().getDefiningOp<GenericOp>();
|
||||
if (!producer || producer.getNumOutputs() != 1 ||
|
||||
!isFusableWithReshapeByDimExpansion(producer,
|
||||
producer.getOutputOperand(0)) ||
|
||||
|
|
|
@ -387,7 +387,7 @@ static void getProducerOfTensor(Value tensor, OpResult &opResult) {
|
|||
return;
|
||||
}
|
||||
if (auto sliceOp = tensor.getDefiningOp<tensor::ExtractSliceOp>()) {
|
||||
tensor = sliceOp.source();
|
||||
tensor = sliceOp.getSource();
|
||||
continue;
|
||||
}
|
||||
if (auto blockArg = tensor.dyn_cast<BlockArgument>()) {
|
||||
|
|
|
@ -256,7 +256,7 @@ bool TileLoopNest::hasOtherUses(BlockArgument bbArg,
|
|||
}
|
||||
if (auto insertSliceOp = dyn_cast<tensor::InsertSliceOp>(op)) {
|
||||
SetVector<Operation *> backwardSlice;
|
||||
getBackwardSlice(insertSliceOp.source(), &backwardSlice,
|
||||
getBackwardSlice(insertSliceOp.getSource(), &backwardSlice,
|
||||
[](Operation *op) {
|
||||
return isa<LinalgOp, tensor::InsertSliceOp>(op);
|
||||
});
|
||||
|
@ -358,8 +358,8 @@ FailureOr<LinalgOp> TileLoopNest::fuseProducer(OpBuilder &b,
|
|||
|
||||
// Check if the producer is a LinalgOp possibly passed by iteration argument.
|
||||
OpOperand *iterArg = nullptr;
|
||||
auto producerResult = sliceOp.source().dyn_cast<OpResult>();
|
||||
if (auto bbArg = sliceOp.source().dyn_cast<BlockArgument>()) {
|
||||
auto producerResult = sliceOp.getSource().dyn_cast<OpResult>();
|
||||
if (auto bbArg = sliceOp.getSource().dyn_cast<BlockArgument>()) {
|
||||
iterArg = getTiedIterArg(bbArg);
|
||||
// Check the iteration argument may be used to pass in the producer output.
|
||||
if (!iterArg || hasOtherUses(bbArg, sliceOp))
|
||||
|
|
|
@ -109,7 +109,7 @@ private:
|
|||
/// Return true if all uses of `padOp` are an input tensor of some
|
||||
/// LinalgOp.
|
||||
static bool isOnlyUsedAsInputOfLinalgOp(tensor::PadOp padOp) {
|
||||
for (OpOperand &use : padOp.result().getUses()) {
|
||||
for (OpOperand &use : padOp.getResult().getUses()) {
|
||||
auto linalgUser = dyn_cast<linalg::LinalgOp>(use.getOwner());
|
||||
if (!linalgUser || !linalgUser.isInputTensor(&use)) {
|
||||
LLVM_DEBUG(DBGS() << "Found a use of " << *(padOp)
|
||||
|
@ -198,12 +198,12 @@ HoistingAnalysis::HoistingAnalysis(tensor::PadOp padOp, int numLoops) {
|
|||
// %slice = tensor.extract_slice %source [%i, %j]
|
||||
// %padded_slice = tensor.pad %slice
|
||||
// ```
|
||||
auto sliceOp = padOp.source().getDefiningOp<tensor::ExtractSliceOp>();
|
||||
auto sliceOp = padOp.getSource().getDefiningOp<tensor::ExtractSliceOp>();
|
||||
if (!sliceOp) {
|
||||
LLVM_DEBUG(DBGS() << "Cannot find the extract slice op -> skip\n");
|
||||
return;
|
||||
}
|
||||
if (!outermostEnclosingForOp.isDefinedOutsideOfLoop(sliceOp.source())) {
|
||||
if (!outermostEnclosingForOp.isDefinedOutsideOfLoop(sliceOp.getSource())) {
|
||||
LLVM_DEBUG(DBGS() << "Source not defined outside of loops -> skip\n");
|
||||
return;
|
||||
}
|
||||
|
@ -453,7 +453,7 @@ FailureOr<Value> mlir::linalg::hoistPaddingOnTensors(
|
|||
// Specifically sit out in the extract_slice(packedTensor) case: this is the
|
||||
// piece we seek to replace.
|
||||
if (auto sliceOp = dyn_cast<tensor::ExtractSliceOp>(op))
|
||||
if (bvm.lookupOrDefault(sliceOp.source()) == packedTensor)
|
||||
if (bvm.lookupOrDefault(sliceOp.getSource()) == packedTensor)
|
||||
continue;
|
||||
// Clone all operations except it is a loop.
|
||||
auto forOp = dyn_cast<scf::ForOp>(op);
|
||||
|
@ -499,7 +499,7 @@ FailureOr<Value> mlir::linalg::hoistPaddingOnTensors(
|
|||
b.getIndexAttr(1));
|
||||
|
||||
// Stack step 2. create GenericOp if `transposeVector` is non-empty.
|
||||
Value paddedTensor = bvm.lookup(opToHoist.result());
|
||||
Value paddedTensor = bvm.lookup(opToHoist.getResult());
|
||||
if (!transposeVector.empty()) {
|
||||
Value outputTensor = b.create<tensor::ExtractSliceOp>(
|
||||
loc, *transposedTensorType, packedTensor, offsets, sizes, strides);
|
||||
|
@ -553,6 +553,6 @@ FailureOr<Value> mlir::linalg::hoistPaddingOnTensors(
|
|||
|
||||
// Make the newly cloned `opToHoist` available to the caller.
|
||||
hoistedOp =
|
||||
cast<tensor::PadOp>(bvm.lookup(opToHoist.result()).getDefiningOp());
|
||||
cast<tensor::PadOp>(bvm.lookup(opToHoist.getResult()).getDefiningOp());
|
||||
return newResult;
|
||||
}
|
||||
|
|
|
@ -76,11 +76,11 @@ static bool isEqualOffsetSizeOrStride(OpFoldResult op1, OpFoldResult op2) {
|
|||
/// Return true is all offsets, sizes and strides are equal.
|
||||
static bool sameOffsetsSizesAndStrides(tensor::ExtractSliceOp s,
|
||||
tensor::InsertSliceOp si) {
|
||||
if (s.static_offsets().size() != si.static_offsets().size())
|
||||
if (s.getStaticOffsets().size() != si.getStaticOffsets().size())
|
||||
return false;
|
||||
if (s.static_sizes().size() != si.static_sizes().size())
|
||||
if (s.getStaticSizes().size() != si.getStaticSizes().size())
|
||||
return false;
|
||||
if (s.static_strides().size() != si.static_strides().size())
|
||||
if (s.getStaticStrides().size() != si.getStaticStrides().size())
|
||||
return false;
|
||||
for (auto it : llvm::zip(s.getMixedOffsets(), si.getMixedOffsets()))
|
||||
if (!isEqualOffsetSizeOrStride(std::get<0>(it), std::get<1>(it)))
|
||||
|
@ -118,7 +118,7 @@ static HoistableRead findMatchingTransferRead(HoistableWrite write,
|
|||
if (write.insertSliceOp) {
|
||||
sliceOp = dyn_cast<tensor::ExtractSliceOp>(user);
|
||||
if (!sliceOp || sliceOp.getResult().getType() !=
|
||||
write.insertSliceOp.source().getType())
|
||||
write.insertSliceOp.getSource().getType())
|
||||
continue;
|
||||
|
||||
LLVM_DEBUG(DBGS() << "check whether sameOffsetsSizesAndStrides: "
|
||||
|
@ -235,12 +235,12 @@ getLoopInvariantTransferWriteOpDefining(scf::ForOp forOp,
|
|||
if (auto insertSliceOp = v.getDefiningOp<tensor::InsertSliceOp>()) {
|
||||
// Inserted slice must come from vector.transfer_write.
|
||||
auto write =
|
||||
insertSliceOp.source().getDefiningOp<vector::TransferWriteOp>();
|
||||
insertSliceOp.getSource().getDefiningOp<vector::TransferWriteOp>();
|
||||
if (!write)
|
||||
return HoistableWrite();
|
||||
|
||||
// Tensor inserted into must be a BBArg at position matching yieldOperand's.
|
||||
auto bbArg = insertSliceOp.dest().dyn_cast<BlockArgument>();
|
||||
auto bbArg = insertSliceOp.getDest().dyn_cast<BlockArgument>();
|
||||
if (!bbArg || bbArg.getOwner()->getParentOp() != forOp ||
|
||||
bbArg.getArgNumber() != /*num iv=*/1 + yieldOperand.getOperandNumber())
|
||||
return HoistableWrite();
|
||||
|
@ -285,7 +285,7 @@ static void hoistReadWrite(HoistableRead read, HoistableWrite write,
|
|||
|
||||
// Update the source tensor.
|
||||
if (read.extractSliceOp)
|
||||
read.extractSliceOp.sourceMutable().assign(
|
||||
read.extractSliceOp.getSourceMutable().assign(
|
||||
forOp.getInitArgs()[initArgNumber]);
|
||||
else
|
||||
read.transferReadOp.getSourceMutable().assign(
|
||||
|
@ -299,7 +299,7 @@ static void hoistReadWrite(HoistableRead read, HoistableWrite write,
|
|||
// Update the yield.
|
||||
auto yieldOp = cast<scf::YieldOp>(forOp.getRegion().front().getTerminator());
|
||||
if (write.insertSliceOp)
|
||||
yieldOp->setOperand(initArgNumber, write.insertSliceOp.dest());
|
||||
yieldOp->setOperand(initArgNumber, write.insertSliceOp.getDest());
|
||||
else
|
||||
yieldOp->setOperand(initArgNumber, write.transferWriteOp.getSource());
|
||||
|
||||
|
@ -321,8 +321,9 @@ static void hoistReadWrite(HoistableRead read, HoistableWrite write,
|
|||
newForOp.getResult(initArgNumber)
|
||||
.replaceAllUsesWith(write.insertSliceOp.getResult());
|
||||
write.transferWriteOp.getSourceMutable().assign(
|
||||
read.extractSliceOp.result());
|
||||
write.insertSliceOp.destMutable().assign(read.extractSliceOp.source());
|
||||
read.extractSliceOp.getResult());
|
||||
write.insertSliceOp.getDestMutable().assign(
|
||||
read.extractSliceOp.getSource());
|
||||
} else {
|
||||
newForOp.getResult(initArgNumber)
|
||||
.replaceAllUsesWith(write.transferWriteOp.getResult());
|
||||
|
|
|
@ -50,7 +50,7 @@ struct FusePadOp : OpRewritePattern<tensor::PadOp> {
|
|||
|
||||
// This pattern could work for any Linalg op. For now restrict it to generic
|
||||
// ops.
|
||||
Value source = padOp.source();
|
||||
Value source = padOp.getSource();
|
||||
auto linalgOp = source.getDefiningOp<linalg::GenericOp>();
|
||||
if (!linalgOp) {
|
||||
return rewriter.notifyMatchFailure(
|
||||
|
|
|
@ -89,9 +89,9 @@ static Value insertSliceIntoTensor(RewriterBase &b, Location loc,
|
|||
tensor::ExtractSliceOp sliceOp, Value source,
|
||||
Value dest) {
|
||||
return b.create<tensor::InsertSliceOp>(
|
||||
loc, sliceOp.source().getType(), source, dest, sliceOp.offsets(),
|
||||
sliceOp.sizes(), sliceOp.strides(), sliceOp.static_offsets(),
|
||||
sliceOp.static_sizes(), sliceOp.static_strides());
|
||||
loc, sliceOp.getSource().getType(), source, dest, sliceOp.getOffsets(),
|
||||
sliceOp.getSizes(), sliceOp.getStrides(), sliceOp.getStaticOffsets(),
|
||||
sliceOp.getStaticSizes(), sliceOp.getStaticStrides());
|
||||
}
|
||||
|
||||
template <typename LoopTy>
|
||||
|
@ -202,7 +202,7 @@ tileLinalgOpImpl(RewriterBase &b, LinalgOp op, ValueRange tileSizes,
|
|||
if (auto sliceOp = outputTensor.getDefiningOp<tensor::ExtractSliceOp>()) {
|
||||
tensorResults.push_back(insertSliceIntoTensor(rewriter, loc, sliceOp,
|
||||
res->getResult(resultIdx),
|
||||
sliceOp.source()));
|
||||
sliceOp.getSource()));
|
||||
} else {
|
||||
tensorResults.push_back(res->getResult(resultIdx));
|
||||
}
|
||||
|
|
|
@ -819,8 +819,8 @@ LogicalResult
|
|||
PadOpTransformationPattern::matchAndRewrite(tensor::PadOp padOp,
|
||||
PatternRewriter &rewriter) const {
|
||||
|
||||
auto inputShapedType = padOp.source().getType().cast<ShapedType>();
|
||||
auto resultShapedType = padOp.result().getType().cast<ShapedType>();
|
||||
auto inputShapedType = padOp.getSource().getType().cast<ShapedType>();
|
||||
auto resultShapedType = padOp.getResult().getType().cast<ShapedType>();
|
||||
|
||||
// Bail on non-static shapes.
|
||||
if (!inputShapedType.hasStaticShape())
|
||||
|
@ -831,9 +831,9 @@ PadOpTransformationPattern::matchAndRewrite(tensor::PadOp padOp,
|
|||
// Only support padding with a constant for now, i.e. either:
|
||||
// 1. A BBarg from a different block.
|
||||
// 2. A value defined outside of the current block.
|
||||
Block &block = padOp.region().front();
|
||||
Block &block = padOp.getRegion().front();
|
||||
auto yieldOp = cast<tensor::YieldOp>(block.getTerminator());
|
||||
Value padValue = yieldOp.value();
|
||||
Value padValue = yieldOp.getValue();
|
||||
Operation *definingOp = padValue.getDefiningOp();
|
||||
if (definingOp && definingOp->getBlock() == &block)
|
||||
return failure();
|
||||
|
@ -858,7 +858,7 @@ PadOpTransformationPattern::matchAndRewrite(tensor::PadOp padOp,
|
|||
SmallVector<AffineExpr, 4> outputExprs;
|
||||
for (unsigned i = 0; i < resultShapedType.getRank(); ++i) {
|
||||
outputExprs.push_back(getAffineDimExpr(i, rewriter.getContext()) +
|
||||
padOp.static_low()[i].cast<IntegerAttr>().getInt());
|
||||
padOp.getStaticLow()[i].cast<IntegerAttr>().getInt());
|
||||
}
|
||||
|
||||
SmallVector<AffineMap, 2> transferMaps = {
|
||||
|
@ -867,7 +867,7 @@ PadOpTransformationPattern::matchAndRewrite(tensor::PadOp padOp,
|
|||
/*symbolCount=*/0, outputExprs, rewriter.getContext())};
|
||||
|
||||
rewriter.replaceOpWithNewOp<linalg::GenericOp>(
|
||||
padOp, resultShapedType, padOp.source(), tmpTensor, transferMaps,
|
||||
padOp, resultShapedType, padOp.getSource(), tmpTensor, transferMaps,
|
||||
getNParallelLoopsAttrs(resultShapedType.getRank()),
|
||||
[&](OpBuilder &nestedBuilder, Location nestedLoc, ValueRange args) {
|
||||
nestedBuilder.create<linalg::YieldOp>(nestedLoc, args[0]);
|
||||
|
@ -890,7 +890,7 @@ Value GeneralizePadOpPattern::createFillOrGenerateOp(
|
|||
padOp.getLoc(), padOp.getResultType(), dynSizes);
|
||||
// Copy region to new op.
|
||||
BlockAndValueMapping bvm;
|
||||
padOp.region().cloneInto(&generateOp.getRegion(), bvm);
|
||||
padOp.getRegion().cloneInto(&generateOp.getRegion(), bvm);
|
||||
return generateOp;
|
||||
}
|
||||
|
||||
|
@ -914,8 +914,8 @@ GeneralizePadOpPattern::matchAndRewrite(tensor::PadOp padOp,
|
|||
SmallVector<int64_t> staticSizes;
|
||||
for (unsigned dim = 0; dim < resultType.getRank(); ++dim) {
|
||||
if (resultType.isDynamicDim(dim)) {
|
||||
auto srcSize = rewriter.createOrFold<tensor::DimOp>(padOp.getLoc(),
|
||||
padOp.source(), dim);
|
||||
auto srcSize = rewriter.createOrFold<tensor::DimOp>(
|
||||
padOp.getLoc(), padOp.getSource(), dim);
|
||||
// Add low and high padding value.
|
||||
auto plusLow = rewriter.createOrFold<arith::AddIOp>(
|
||||
padOp.getLoc(), srcSize, getIdxValue(padOp.getMixedLowPad()[dim]));
|
||||
|
@ -943,7 +943,7 @@ GeneralizePadOpPattern::matchAndRewrite(tensor::PadOp padOp,
|
|||
for (unsigned dim = 0; dim < sourceType.getRank(); ++dim) {
|
||||
if (sourceType.isDynamicDim(dim)) {
|
||||
srcSizes.push_back(rewriter.createOrFold<tensor::DimOp>(
|
||||
padOp.getLoc(), padOp.source(), dim));
|
||||
padOp.getLoc(), padOp.getSource(), dim));
|
||||
} else {
|
||||
srcSizes.push_back(rewriter.getIndexAttr(sourceType.getDimSize(dim)));
|
||||
}
|
||||
|
@ -952,7 +952,8 @@ GeneralizePadOpPattern::matchAndRewrite(tensor::PadOp padOp,
|
|||
SmallVector<OpFoldResult> strides(sourceType.getRank(),
|
||||
rewriter.getIndexAttr(1));
|
||||
rewriter.replaceOpWithNewOp<tensor::InsertSliceOp>(
|
||||
padOp, padOp.source(), fill, padOp.getMixedLowPad(), srcSizes, strides);
|
||||
padOp, padOp.getSource(), fill, padOp.getMixedLowPad(), srcSizes,
|
||||
strides);
|
||||
|
||||
return success();
|
||||
}
|
||||
|
@ -962,7 +963,7 @@ LogicalResult ExtractSliceOfPadTensorSwapPattern::matchAndRewrite(
|
|||
if (!sliceOp.hasUnitStride())
|
||||
return failure();
|
||||
|
||||
auto padOp = sliceOp.source().getDefiningOp<tensor::PadOp>();
|
||||
auto padOp = sliceOp.getSource().getDefiningOp<tensor::PadOp>();
|
||||
if (!padOp)
|
||||
return failure();
|
||||
|
||||
|
|
|
@ -743,7 +743,7 @@ struct GenericPadOpVectorizationPattern : public GeneralizePadOpPattern {
|
|||
vecType.getRank(),
|
||||
rewriter.create<arith::ConstantIndexOp>(padOp.getLoc(), 0));
|
||||
auto read = rewriter.create<vector::TransferReadOp>(
|
||||
padOp.getLoc(), vecType, padOp.source(), readIndices, padValue,
|
||||
padOp.getLoc(), vecType, padOp.getSource(), readIndices, padValue,
|
||||
ArrayRef<bool>{readInBounds});
|
||||
|
||||
// If `dest` is a FillOp and the TransferWriteOp would overwrite the
|
||||
|
@ -825,7 +825,7 @@ struct PadOpVectorizationWithTransferReadPattern
|
|||
SmallVector<bool> inBounds(xferOp.getVectorType().getRank(), false);
|
||||
xferOp->setAttr(xferOp.getInBoundsAttrName(),
|
||||
rewriter.getBoolArrayAttr(inBounds));
|
||||
xferOp.getSourceMutable().assign(padOp.source());
|
||||
xferOp.getSourceMutable().assign(padOp.getSource());
|
||||
xferOp.getPaddingMutable().assign(padValue);
|
||||
});
|
||||
|
||||
|
@ -893,7 +893,7 @@ struct PadOpVectorizationWithTransferWritePattern
|
|||
if (!trimPadding.hasZeroOffset())
|
||||
return failure();
|
||||
// trimPadding must remove the amount of padding that was added earlier.
|
||||
if (!hasSameTensorSize(padOp.source(), trimPadding))
|
||||
if (!hasSameTensorSize(padOp.getSource(), trimPadding))
|
||||
return failure();
|
||||
|
||||
// Insert the new TransferWriteOp at position of the old TransferWriteOp.
|
||||
|
@ -901,9 +901,9 @@ struct PadOpVectorizationWithTransferWritePattern
|
|||
|
||||
SmallVector<bool> inBounds(xferOp.getVectorType().getRank(), false);
|
||||
auto newXferOp = rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(
|
||||
xferOp, padOp.source().getType(), xferOp.getVector(), padOp.source(),
|
||||
xferOp.getIndices(), xferOp.getPermutationMapAttr(), xferOp.getMask(),
|
||||
rewriter.getBoolArrayAttr(inBounds));
|
||||
xferOp, padOp.getSource().getType(), xferOp.getVector(),
|
||||
padOp.getSource(), xferOp.getIndices(), xferOp.getPermutationMapAttr(),
|
||||
xferOp.getMask(), rewriter.getBoolArrayAttr(inBounds));
|
||||
rewriter.replaceOp(trimPadding, newXferOp->getResult(0));
|
||||
|
||||
return success();
|
||||
|
@ -924,7 +924,7 @@ struct PadOpVectorizationWithTransferWritePattern
|
|||
// If the input to tensor::PadOp is a CastOp, try with with both CastOp
|
||||
// result and CastOp operand.
|
||||
if (auto castOp = beforePadding.getDefiningOp<tensor::CastOp>())
|
||||
if (hasSameTensorSize(castOp.source(), afterTrimming))
|
||||
if (hasSameTensorSize(castOp.getSource(), afterTrimming))
|
||||
return true;
|
||||
|
||||
auto t1 = beforePadding.getType().dyn_cast<RankedTensorType>();
|
||||
|
@ -1037,10 +1037,10 @@ struct PadOpVectorizationWithInsertSlicePattern
|
|||
if (!padValue)
|
||||
return failure();
|
||||
// Dynamic shapes not supported.
|
||||
if (!padOp.result().getType().cast<ShapedType>().hasStaticShape())
|
||||
if (!padOp.getResult().getType().cast<ShapedType>().hasStaticShape())
|
||||
return failure();
|
||||
// Pad result not used as destination.
|
||||
if (insertOp.dest() == padOp.result())
|
||||
if (insertOp.getDest() == padOp.getResult())
|
||||
return failure();
|
||||
|
||||
auto vecType = VectorType::get(padOp.getType().getShape(),
|
||||
|
@ -1067,7 +1067,7 @@ struct PadOpVectorizationWithInsertSlicePattern
|
|||
SmallVector<Value> readIndices(
|
||||
vecRank, rewriter.create<arith::ConstantIndexOp>(padOp.getLoc(), 0));
|
||||
auto read = rewriter.create<vector::TransferReadOp>(
|
||||
padOp.getLoc(), vecType, padOp.source(), readIndices, padValue);
|
||||
padOp.getLoc(), vecType, padOp.getSource(), readIndices, padValue);
|
||||
|
||||
// Generate TransferWriteOp: Write to InsertSliceOp's dest tensor at
|
||||
// specified offsets. Write is fully in-bounds because a InsertSliceOp's
|
||||
|
@ -1076,7 +1076,7 @@ struct PadOpVectorizationWithInsertSlicePattern
|
|||
ofrToIndexValues(rewriter, padOp.getLoc(), insertOp.getMixedOffsets());
|
||||
SmallVector<bool> inBounds(vecRank, true);
|
||||
rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(
|
||||
insertOp, read, insertOp.dest(), writeIndices,
|
||||
insertOp, read, insertOp.getDest(), writeIndices,
|
||||
ArrayRef<bool>{inBounds});
|
||||
|
||||
return success();
|
||||
|
|
|
@ -369,7 +369,7 @@ tensor::ExtractSliceOp makeComposedExtractSliceOp(
|
|||
foldedOffsets[en.index()] =
|
||||
makeComposedAffineApply(b, loc, dim1 + dim2, offsetValues).getResult();
|
||||
}
|
||||
return b.create<tensor::ExtractSliceOp>(loc, producerOp.source(),
|
||||
return b.create<tensor::ExtractSliceOp>(loc, producerOp.getSource(),
|
||||
foldedOffsets, sizes, strides);
|
||||
}
|
||||
|
||||
|
@ -381,7 +381,7 @@ Value makeComposedPadHighOp(OpBuilder &b, Location loc, RankedTensorType type,
|
|||
return tensor::createPadHighOp(type, source, pad, nofold, loc, b);
|
||||
|
||||
// Search the `source` use-def chain for padded LinalgOps.
|
||||
Value current = sliceOp.source();
|
||||
Value current = sliceOp.getSource();
|
||||
while (current) {
|
||||
auto linalgOp = current.getDefiningOp<LinalgOp>();
|
||||
if (!linalgOp)
|
||||
|
@ -397,7 +397,7 @@ Value makeComposedPadHighOp(OpBuilder &b, Location loc, RankedTensorType type,
|
|||
return tensor::createPadHighOp(type, source, pad, nofold, loc, b);
|
||||
|
||||
// Exit if the padded result type does not match.
|
||||
if (sliceOp.source().getType() != type)
|
||||
if (sliceOp.getSource().getType() != type)
|
||||
return tensor::createPadHighOp(type, source, pad, nofold, loc, b);
|
||||
|
||||
// Exit if the LinalgOps are not high padded.
|
||||
|
@ -408,7 +408,7 @@ Value makeComposedPadHighOp(OpBuilder &b, Location loc, RankedTensorType type,
|
|||
|
||||
// Exit if `padOpSliceOp`, which defines the slice used by
|
||||
// `padOp`, is rank-reducing.
|
||||
auto padOpSliceOp = padOp.source().getDefiningOp<tensor::ExtractSliceOp>();
|
||||
auto padOpSliceOp = padOp.getSource().getDefiningOp<tensor::ExtractSliceOp>();
|
||||
if (!padOpSliceOp ||
|
||||
sliceOp.getMixedSizes().size() != padOpSliceOp.getMixedSizes().size())
|
||||
return tensor::createPadHighOp(type, source, pad, nofold, loc, b);
|
||||
|
@ -430,7 +430,7 @@ Value makeComposedPadHighOp(OpBuilder &b, Location loc, RankedTensorType type,
|
|||
return tensor::createPadHighOp(type, source, pad, nofold, loc, b);
|
||||
|
||||
// Return the padded result if the padding values and sizes match.
|
||||
return sliceOp.source();
|
||||
return sliceOp.getSource();
|
||||
}
|
||||
|
||||
GenericOp makeTransposeOp(OpBuilder &b, Location loc, Value inputTensor,
|
||||
|
|
|
@ -93,7 +93,7 @@ resolveSourceIndices(Location loc, PatternRewriter &rewriter,
|
|||
/// Helpers to access the memref operand for each op.
|
||||
template <typename LoadOrStoreOpTy>
|
||||
static Value getMemRefOperand(LoadOrStoreOpTy op) {
|
||||
return op.memref();
|
||||
return op.getMemref();
|
||||
}
|
||||
|
||||
static Value getMemRefOperand(vector::TransferReadOp op) {
|
||||
|
@ -186,7 +186,7 @@ template <typename StoreOpTy>
|
|||
void StoreOpOfSubViewFolder<StoreOpTy>::replaceOp(
|
||||
StoreOpTy storeOp, memref::SubViewOp subViewOp,
|
||||
ArrayRef<Value> sourceIndices, PatternRewriter &rewriter) const {
|
||||
rewriter.replaceOpWithNewOp<StoreOpTy>(storeOp, storeOp.value(),
|
||||
rewriter.replaceOpWithNewOp<StoreOpTy>(storeOp, storeOp.getValue(),
|
||||
subViewOp.source(), sourceIndices);
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ struct DimOfShapedTypeOpInterface : public OpRewritePattern<OpTy> {
|
|||
|
||||
LogicalResult matchAndRewrite(OpTy dimOp,
|
||||
PatternRewriter &rewriter) const override {
|
||||
OpResult dimValue = dimOp.source().template dyn_cast<OpResult>();
|
||||
OpResult dimValue = dimOp.getSource().template dyn_cast<OpResult>();
|
||||
if (!dimValue)
|
||||
return failure();
|
||||
auto shapedTypeOp =
|
||||
|
@ -70,7 +70,7 @@ struct DimOfReifyRankedShapedTypeOpInterface : public OpRewritePattern<OpTy> {
|
|||
|
||||
LogicalResult matchAndRewrite(OpTy dimOp,
|
||||
PatternRewriter &rewriter) const override {
|
||||
OpResult dimValue = dimOp.source().template dyn_cast<OpResult>();
|
||||
OpResult dimValue = dimOp.getSource().template dyn_cast<OpResult>();
|
||||
if (!dimValue)
|
||||
return failure();
|
||||
auto rankedShapeTypeOp =
|
||||
|
|
|
@ -37,10 +37,10 @@ void QuantizationDialect::initialize() {
|
|||
OpFoldResult StorageCastOp::fold(ArrayRef<Attribute> operands) {
|
||||
// Matches x -> [scast -> scast] -> y, replacing the second scast with the
|
||||
// value of x if the casts invert each other.
|
||||
auto srcScastOp = arg().getDefiningOp<StorageCastOp>();
|
||||
if (!srcScastOp || srcScastOp.arg().getType() != getType())
|
||||
auto srcScastOp = getArg().getDefiningOp<StorageCastOp>();
|
||||
if (!srcScastOp || srcScastOp.getArg().getType() != getType())
|
||||
return OpFoldResult();
|
||||
return srcScastOp.arg();
|
||||
return srcScastOp.getArg();
|
||||
}
|
||||
|
||||
/// The quantization specification should match the expressed type.
|
||||
|
@ -67,13 +67,13 @@ static bool isValidQuantizationSpec(Attribute quantSpec, Type expressed) {
|
|||
|
||||
LogicalResult QuantizeRegionOp::verify() {
|
||||
// There are specifications for both inputs and outputs.
|
||||
if (getNumOperands() != input_specs().size() ||
|
||||
getNumResults() != output_specs().size())
|
||||
if (getNumOperands() != getInputSpecs().size() ||
|
||||
getNumResults() != getOutputSpecs().size())
|
||||
return emitOpError(
|
||||
"has unmatched operands/results number and spec attributes number");
|
||||
|
||||
// Verify that quantization specifications are valid.
|
||||
for (auto input : llvm::zip(getOperandTypes(), input_specs())) {
|
||||
for (auto input : llvm::zip(getOperandTypes(), getInputSpecs())) {
|
||||
Type inputType = std::get<0>(input);
|
||||
Attribute inputSpec = std::get<1>(input);
|
||||
if (!isValidQuantizationSpec(inputSpec, inputType)) {
|
||||
|
@ -82,7 +82,7 @@ LogicalResult QuantizeRegionOp::verify() {
|
|||
}
|
||||
}
|
||||
|
||||
for (auto result : llvm::zip(getResultTypes(), output_specs())) {
|
||||
for (auto result : llvm::zip(getResultTypes(), getOutputSpecs())) {
|
||||
Type outputType = std::get<0>(result);
|
||||
Attribute outputSpec = std::get<1>(result);
|
||||
if (!isValidQuantizationSpec(outputSpec, outputType)) {
|
||||
|
@ -94,13 +94,13 @@ LogicalResult QuantizeRegionOp::verify() {
|
|||
}
|
||||
|
||||
LogicalResult StatisticsOp::verify() {
|
||||
auto tensorArg = arg().getType().dyn_cast<TensorType>();
|
||||
auto tensorArg = getArg().getType().dyn_cast<TensorType>();
|
||||
if (!tensorArg)
|
||||
return emitOpError("arg needs to be tensor type.");
|
||||
|
||||
// Verify layerStats attribute.
|
||||
{
|
||||
auto layerStatsType = layerStats().getType();
|
||||
auto layerStatsType = getLayerStats().getType();
|
||||
if (!layerStatsType.getElementType().isa<FloatType>()) {
|
||||
return emitOpError("layerStats must have a floating point element type");
|
||||
}
|
||||
|
@ -109,16 +109,16 @@ LogicalResult StatisticsOp::verify() {
|
|||
}
|
||||
}
|
||||
// Verify axisStats (optional) attribute.
|
||||
if (axisStats()) {
|
||||
if (!axis())
|
||||
if (getAxisStats()) {
|
||||
if (!getAxis())
|
||||
return emitOpError("axis must be specified for axisStats");
|
||||
|
||||
auto shape = tensorArg.getShape();
|
||||
auto argSliceSize =
|
||||
std::accumulate(std::next(shape.begin(), *axis()), shape.end(), 1,
|
||||
std::accumulate(std::next(shape.begin(), *getAxis()), shape.end(), 1,
|
||||
std::multiplies<int64_t>());
|
||||
|
||||
auto axisStatsType = axisStats()->getType();
|
||||
auto axisStatsType = getAxisStats()->getType();
|
||||
if (!axisStatsType.getElementType().isa<FloatType>()) {
|
||||
return emitOpError("axisStats must have a floating point element type");
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ QuantizedConstRewrite::matchAndRewrite(QuantizeCastOp qbarrier,
|
|||
Attribute value;
|
||||
|
||||
// Is the operand a constant?
|
||||
if (!matchPattern(qbarrier.arg(), m_Constant(&value))) {
|
||||
if (!matchPattern(qbarrier.getArg(), m_Constant(&value))) {
|
||||
return failure();
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,7 @@ QuantizedConstRewrite::matchAndRewrite(QuantizeCastOp qbarrier,
|
|||
// type? This will not be true if the qbarrier is superfluous (converts
|
||||
// from and to a quantized type).
|
||||
if (!quantizedElementType.isCompatibleExpressedType(
|
||||
qbarrier.arg().getType())) {
|
||||
qbarrier.getArg().getType())) {
|
||||
return failure();
|
||||
}
|
||||
|
||||
|
@ -82,7 +82,7 @@ QuantizedConstRewrite::matchAndRewrite(QuantizeCastOp qbarrier,
|
|||
// When creating the new const op, use a fused location that combines the
|
||||
// original const and the qbarrier that led to the quantization.
|
||||
auto fusedLoc = rewriter.getFusedLoc(
|
||||
{qbarrier.arg().getDefiningOp()->getLoc(), qbarrier.getLoc()});
|
||||
{qbarrier.getArg().getDefiningOp()->getLoc(), qbarrier.getLoc()});
|
||||
auto newConstOp = rewriter.create<arith::ConstantOp>(
|
||||
fusedLoc, newConstValueType, newConstValue);
|
||||
rewriter.replaceOpWithNewOp<StorageCastOp>(qbarrier, qbarrier.getType(),
|
||||
|
|
|
@ -69,7 +69,7 @@ private:
|
|||
// TODO: Map to a qbarrier with an attribute like [Forced] to signal that
|
||||
// this is a forced/hard-coded constraint.
|
||||
auto qbarrier = rewriter.create<QuantizeCastOp>(op.getLoc(), quantizedType,
|
||||
op.inputs());
|
||||
op.getInputs());
|
||||
rewriter.replaceOpWithNewOp<DequantizeCastOp>(op, converter.inputType,
|
||||
qbarrier.getResult());
|
||||
|
||||
|
@ -88,9 +88,9 @@ public:
|
|||
QuantizedType convertFakeQuantAttrsToType(ConstFakeQuant fqOp,
|
||||
Type expressedType) const {
|
||||
return fakeQuantAttrsToType(
|
||||
fqOp.getLoc(), fqOp.num_bits(), fqOp.min().convertToFloat(),
|
||||
fqOp.max().convertToFloat(), fqOp.narrow_range(), expressedType,
|
||||
fqOp.is_signed());
|
||||
fqOp.getLoc(), fqOp.getNumBits(), fqOp.getMin().convertToFloat(),
|
||||
fqOp.getMax().convertToFloat(), fqOp.getNarrowRange(), expressedType,
|
||||
fqOp.getIsSigned());
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -107,16 +107,16 @@ public:
|
|||
QuantizedType convertFakeQuantAttrsToType(ConstFakeQuantPerAxis fqOp,
|
||||
Type expressedType) const {
|
||||
SmallVector<double, 4> min, max;
|
||||
min.reserve(fqOp.min().size());
|
||||
max.reserve(fqOp.max().size());
|
||||
for (auto m : fqOp.min())
|
||||
min.reserve(fqOp.getMin().size());
|
||||
max.reserve(fqOp.getMax().size());
|
||||
for (auto m : fqOp.getMin())
|
||||
min.push_back(m.cast<FloatAttr>().getValueAsDouble());
|
||||
for (auto m : fqOp.max())
|
||||
for (auto m : fqOp.getMax())
|
||||
max.push_back(m.cast<FloatAttr>().getValueAsDouble());
|
||||
|
||||
return fakeQuantAttrsToType(fqOp.getLoc(), fqOp.num_bits(), fqOp.axis(),
|
||||
min, max, fqOp.narrow_range(), expressedType,
|
||||
fqOp.is_signed());
|
||||
return fakeQuantAttrsToType(fqOp.getLoc(), fqOp.getNumBits(),
|
||||
fqOp.getAxis(), min, max, fqOp.getNarrowRange(),
|
||||
expressedType, fqOp.getIsSigned());
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ static bool isShapePreserving(ForOp forOp, int64_t arg) {
|
|||
value =
|
||||
llvm::TypeSwitch<Operation *, Value>(opResult.getOwner())
|
||||
.template Case<InsertSliceOp>(
|
||||
[&](InsertSliceOp op) { return op.dest(); })
|
||||
[&](InsertSliceOp op) { return op.getDest(); })
|
||||
.template Case<ForOp>([&](ForOp forOp) {
|
||||
return isShapePreserving(forOp, opResult.getResultNumber())
|
||||
? forOp.getIterOperands()[opResult.getResultNumber()]
|
||||
|
@ -86,7 +86,7 @@ struct DimOfIterArgFolder : public OpRewritePattern<OpTy> {
|
|||
|
||||
LogicalResult matchAndRewrite(OpTy dimOp,
|
||||
PatternRewriter &rewriter) const override {
|
||||
auto blockArg = dimOp.source().template dyn_cast<BlockArgument>();
|
||||
auto blockArg = dimOp.getSource().template dyn_cast<BlockArgument>();
|
||||
if (!blockArg)
|
||||
return failure();
|
||||
auto forOp = dyn_cast<ForOp>(blockArg.getParentBlock()->getParentOp());
|
||||
|
@ -97,7 +97,7 @@ struct DimOfIterArgFolder : public OpRewritePattern<OpTy> {
|
|||
|
||||
Value initArg = forOp.getOpOperandForRegionIterArg(blockArg).get();
|
||||
rewriter.updateRootInPlace(
|
||||
dimOp, [&]() { dimOp.sourceMutable().assign(initArg); });
|
||||
dimOp, [&]() { dimOp.getSourceMutable().assign(initArg); });
|
||||
|
||||
return success();
|
||||
};
|
||||
|
@ -131,15 +131,15 @@ struct DimOfLoopResultFolder : public OpRewritePattern<OpTy> {
|
|||
|
||||
LogicalResult matchAndRewrite(OpTy dimOp,
|
||||
PatternRewriter &rewriter) const override {
|
||||
auto forOp = dimOp.source().template getDefiningOp<scf::ForOp>();
|
||||
auto forOp = dimOp.getSource().template getDefiningOp<scf::ForOp>();
|
||||
if (!forOp)
|
||||
return failure();
|
||||
auto opResult = dimOp.source().template cast<OpResult>();
|
||||
auto opResult = dimOp.getSource().template cast<OpResult>();
|
||||
unsigned resultNumber = opResult.getResultNumber();
|
||||
if (!isShapePreserving(forOp, resultNumber))
|
||||
return failure();
|
||||
rewriter.updateRootInPlace(dimOp, [&]() {
|
||||
dimOp.sourceMutable().assign(forOp.getIterOperands()[resultNumber]);
|
||||
dimOp.getSourceMutable().assign(forOp.getIterOperands()[resultNumber]);
|
||||
});
|
||||
return success();
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ static void specializeParallelLoopForUnrolling(ParallelOp op) {
|
|||
if (!minOp)
|
||||
return;
|
||||
int64_t minConstant = std::numeric_limits<int64_t>::max();
|
||||
for (AffineExpr expr : minOp.map().getResults()) {
|
||||
for (AffineExpr expr : minOp.getMap().getResults()) {
|
||||
if (auto constantIndex = expr.dyn_cast<AffineConstantExpr>())
|
||||
minConstant = std::min(minConstant, constantIndex.getValue());
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ static void specializeForLoopForUnrolling(ForOp op) {
|
|||
if (!minOp)
|
||||
return;
|
||||
int64_t minConstant = std::numeric_limits<int64_t>::max();
|
||||
for (AffineExpr expr : minOp.map().getResults()) {
|
||||
for (AffineExpr expr : minOp.getMap().getResults()) {
|
||||
if (auto constantIndex = expr.dyn_cast<AffineConstantExpr>())
|
||||
minConstant = std::min(minConstant, constantIndex.getValue());
|
||||
}
|
||||
|
|
|
@ -333,7 +333,7 @@ scf::TileConsumerAndFuseProducersUsingSCFForOp::returningMatchAndRewrite(
|
|||
// 2b. Get the producer of the source (potentially walking through
|
||||
// `iter_args` of nested `scf.for`)
|
||||
Optional<OpResult> fusableProducer =
|
||||
getFusableProducer(candidateSliceOp.source());
|
||||
getFusableProducer(candidateSliceOp.getSource());
|
||||
if (!fusableProducer)
|
||||
continue;
|
||||
|
||||
|
|
|
@ -786,7 +786,7 @@ struct CanonicalizeCastExtentTensorOperandsPattern
|
|||
castOp.getType().cast<RankedTensorType>().isDynamicDim(0);
|
||||
if (isInformationLoosingCast) {
|
||||
anyChange = true;
|
||||
return castOp.source();
|
||||
return castOp.getSource();
|
||||
}
|
||||
}
|
||||
return operand;
|
||||
|
@ -1597,7 +1597,7 @@ struct ShapeOfCastExtentTensor : public OpRewritePattern<tensor::CastOp> {
|
|||
if (!ty || ty.getRank() != 1)
|
||||
return failure();
|
||||
|
||||
auto shapeOfOp = op.source().getDefiningOp<ShapeOfOp>();
|
||||
auto shapeOfOp = op.getSource().getDefiningOp<ShapeOfOp>();
|
||||
if (!shapeOfOp)
|
||||
return failure();
|
||||
|
||||
|
|
|
@ -209,8 +209,8 @@ static LogicalResult isMatchingWidth(Value result, unsigned width) {
|
|||
}
|
||||
|
||||
LogicalResult ConvertOp::verify() {
|
||||
if (auto tp1 = source().getType().dyn_cast<RankedTensorType>()) {
|
||||
if (auto tp2 = dest().getType().dyn_cast<RankedTensorType>()) {
|
||||
if (auto tp1 = getSource().getType().dyn_cast<RankedTensorType>()) {
|
||||
if (auto tp2 = getDest().getType().dyn_cast<RankedTensorType>()) {
|
||||
if (tp1.getRank() != tp2.getRank())
|
||||
return emitError("unexpected conversion mismatch in rank");
|
||||
auto shape1 = tp1.getShape();
|
||||
|
@ -228,32 +228,32 @@ LogicalResult ConvertOp::verify() {
|
|||
}
|
||||
|
||||
OpFoldResult ConvertOp::fold(ArrayRef<Attribute> operands) {
|
||||
if (getType() == source().getType())
|
||||
return source();
|
||||
if (getType() == getSource().getType())
|
||||
return getSource();
|
||||
return {};
|
||||
}
|
||||
|
||||
LogicalResult ToPointersOp::verify() {
|
||||
auto e = getSparseTensorEncoding(tensor().getType());
|
||||
if (failed(isInBounds(dim(), tensor())))
|
||||
auto e = getSparseTensorEncoding(getTensor().getType());
|
||||
if (failed(isInBounds(getDim(), getTensor())))
|
||||
return emitError("requested pointers dimension out of bounds");
|
||||
if (failed(isMatchingWidth(result(), e.getPointerBitWidth())))
|
||||
if (failed(isMatchingWidth(getResult(), e.getPointerBitWidth())))
|
||||
return emitError("unexpected type for pointers");
|
||||
return success();
|
||||
}
|
||||
|
||||
LogicalResult ToIndicesOp::verify() {
|
||||
auto e = getSparseTensorEncoding(tensor().getType());
|
||||
if (failed(isInBounds(dim(), tensor())))
|
||||
auto e = getSparseTensorEncoding(getTensor().getType());
|
||||
if (failed(isInBounds(getDim(), getTensor())))
|
||||
return emitError("requested indices dimension out of bounds");
|
||||
if (failed(isMatchingWidth(result(), e.getIndexBitWidth())))
|
||||
if (failed(isMatchingWidth(getResult(), e.getIndexBitWidth())))
|
||||
return emitError("unexpected type for indices");
|
||||
return success();
|
||||
}
|
||||
|
||||
LogicalResult ToValuesOp::verify() {
|
||||
RankedTensorType ttp = tensor().getType().cast<RankedTensorType>();
|
||||
MemRefType mtp = result().getType().cast<MemRefType>();
|
||||
RankedTensorType ttp = getTensor().getType().cast<RankedTensorType>();
|
||||
MemRefType mtp = getResult().getType().cast<MemRefType>();
|
||||
if (ttp.getElementType() != mtp.getElementType())
|
||||
return emitError("unexpected mismatch in element types");
|
||||
return success();
|
||||
|
@ -292,12 +292,12 @@ static LogicalResult verifyNumBlockArgs(T *op, Region ®ion,
|
|||
|
||||
LogicalResult BinaryOp::verify() {
|
||||
NamedAttrList attrs = (*this)->getAttrs();
|
||||
Type leftType = x().getType();
|
||||
Type rightType = y().getType();
|
||||
Type outputType = output().getType();
|
||||
Region &overlap = overlapRegion();
|
||||
Region &left = leftRegion();
|
||||
Region &right = rightRegion();
|
||||
Type leftType = getX().getType();
|
||||
Type rightType = getY().getType();
|
||||
Type outputType = getOutput().getType();
|
||||
Region &overlap = getOverlapRegion();
|
||||
Region &left = getLeftRegion();
|
||||
Region &right = getRightRegion();
|
||||
|
||||
// Check correct number of block arguments and return type for each
|
||||
// non-empty region.
|
||||
|
@ -313,7 +313,7 @@ LogicalResult BinaryOp::verify() {
|
|||
verifyNumBlockArgs(this, left, "left", TypeRange{leftType}, outputType);
|
||||
if (failed(regionResult))
|
||||
return regionResult;
|
||||
} else if (left_identity()) {
|
||||
} else if (getLeftIdentity()) {
|
||||
if (leftType != outputType)
|
||||
return emitError("left=identity requires first argument to have the same "
|
||||
"type as the output");
|
||||
|
@ -323,7 +323,7 @@ LogicalResult BinaryOp::verify() {
|
|||
TypeRange{rightType}, outputType);
|
||||
if (failed(regionResult))
|
||||
return regionResult;
|
||||
} else if (right_identity()) {
|
||||
} else if (getRightIdentity()) {
|
||||
if (rightType != outputType)
|
||||
return emitError("right=identity requires second argument to have the "
|
||||
"same type as the output");
|
||||
|
@ -333,20 +333,20 @@ LogicalResult BinaryOp::verify() {
|
|||
}
|
||||
|
||||
LogicalResult UnaryOp::verify() {
|
||||
Type inputType = x().getType();
|
||||
Type outputType = output().getType();
|
||||
Type inputType = getX().getType();
|
||||
Type outputType = getOutput().getType();
|
||||
LogicalResult regionResult = success();
|
||||
|
||||
// Check correct number of block arguments and return type for each
|
||||
// non-empty region.
|
||||
Region &present = presentRegion();
|
||||
Region &present = getPresentRegion();
|
||||
if (!present.empty()) {
|
||||
regionResult = verifyNumBlockArgs(this, present, "present",
|
||||
TypeRange{inputType}, outputType);
|
||||
if (failed(regionResult))
|
||||
return regionResult;
|
||||
}
|
||||
Region &absent = absentRegion();
|
||||
Region &absent = getAbsentRegion();
|
||||
if (!absent.empty()) {
|
||||
regionResult =
|
||||
verifyNumBlockArgs(this, absent, "absent", TypeRange{}, outputType);
|
||||
|
|
|
@ -683,7 +683,7 @@ unsigned Merger::buildLattices(unsigned e, unsigned i) {
|
|||
{
|
||||
unsigned child0 = buildLattices(tensorExps[e].children.e0, i);
|
||||
UnaryOp unop = cast<UnaryOp>(tensorExps[e].op);
|
||||
Region &absentRegion = unop.absentRegion();
|
||||
Region &absentRegion = unop.getAbsentRegion();
|
||||
|
||||
if (absentRegion.empty()) {
|
||||
// Simple mapping over existing values.
|
||||
|
@ -692,7 +692,7 @@ unsigned Merger::buildLattices(unsigned e, unsigned i) {
|
|||
// invariant on the right.
|
||||
Block &absentBlock = absentRegion.front();
|
||||
YieldOp absentYield = cast<YieldOp>(absentBlock.getTerminator());
|
||||
Value absentVal = absentYield.result();
|
||||
Value absentVal = absentYield.getResult();
|
||||
unsigned rhs = addExp(kInvariant, absentVal);
|
||||
return takeDisj(kind, child0, buildLattices(rhs, i), unop);
|
||||
}
|
||||
|
@ -773,8 +773,8 @@ unsigned Merger::buildLattices(unsigned e, unsigned i) {
|
|||
unsigned child0 = buildLattices(tensorExps[e].children.e0, i);
|
||||
unsigned child1 = buildLattices(tensorExps[e].children.e1, i);
|
||||
BinaryOp binop = cast<BinaryOp>(tensorExps[e].op);
|
||||
Region &leftRegion = binop.leftRegion();
|
||||
Region &rightRegion = binop.rightRegion();
|
||||
Region &leftRegion = binop.getLeftRegion();
|
||||
Region &rightRegion = binop.getRightRegion();
|
||||
// Left Region.
|
||||
Operation *leftYield = nullptr;
|
||||
if (!leftRegion.empty()) {
|
||||
|
@ -787,8 +787,8 @@ unsigned Merger::buildLattices(unsigned e, unsigned i) {
|
|||
Block &rightBlock = rightRegion.front();
|
||||
rightYield = rightBlock.getTerminator();
|
||||
}
|
||||
bool includeLeft = binop.left_identity() || !leftRegion.empty();
|
||||
bool includeRight = binop.right_identity() || !rightRegion.empty();
|
||||
bool includeLeft = binop.getLeftIdentity() || !leftRegion.empty();
|
||||
bool includeRight = binop.getRightIdentity() || !rightRegion.empty();
|
||||
return takeCombi(kBinary, child0, child1, binop, includeLeft,
|
||||
kBinaryBranch, leftYield, includeRight, kBinaryBranch,
|
||||
rightYield);
|
||||
|
@ -954,8 +954,8 @@ Optional<unsigned> Merger::buildTensorExp(linalg::GenericOp op, Value v) {
|
|||
if (isa<arith::BitcastOp>(def))
|
||||
return addExp(kBitCast, e, v);
|
||||
if (auto unop = dyn_cast<sparse_tensor::UnaryOp>(def)) {
|
||||
if (isAdmissableBranch(unop, unop.presentRegion()) &&
|
||||
isAdmissableBranch(unop, unop.absentRegion()))
|
||||
if (isAdmissableBranch(unop, unop.getPresentRegion()) &&
|
||||
isAdmissableBranch(unop, unop.getAbsentRegion()))
|
||||
return addExp(kUnary, e, Value(), def);
|
||||
}
|
||||
}
|
||||
|
@ -1008,11 +1008,11 @@ Optional<unsigned> Merger::buildTensorExp(linalg::GenericOp op, Value v) {
|
|||
if (isa<arith::ShLIOp>(def) && isInvariant(e1))
|
||||
return addExp(kShlI, e0, e1);
|
||||
if (auto binop = dyn_cast<sparse_tensor::BinaryOp>(def)) {
|
||||
if (isAdmissableBranch(binop, binop.overlapRegion()) &&
|
||||
(binop.left_identity() ||
|
||||
isAdmissableBranch(binop, binop.leftRegion())) &&
|
||||
(binop.right_identity() ||
|
||||
isAdmissableBranch(binop, binop.rightRegion())))
|
||||
if (isAdmissableBranch(binop, binop.getOverlapRegion()) &&
|
||||
(binop.getLeftIdentity() ||
|
||||
isAdmissableBranch(binop, binop.getLeftRegion())) &&
|
||||
(binop.getRightIdentity() ||
|
||||
isAdmissableBranch(binop, binop.getRightRegion())))
|
||||
return addExp(kBinary, e0, e1, Value(), def);
|
||||
}
|
||||
}
|
||||
|
@ -1032,7 +1032,7 @@ static Value insertYieldOp(RewriterBase &rewriter, Location loc, Region ®ion,
|
|||
// Merge cloned block and return yield value.
|
||||
Operation *placeholder = rewriter.create<arith::ConstantIndexOp>(loc, 0);
|
||||
rewriter.mergeBlockBefore(&tmpRegion.front(), placeholder, vals);
|
||||
Value val = clonedYield.result();
|
||||
Value val = clonedYield.getResult();
|
||||
rewriter.eraseOp(clonedYield);
|
||||
rewriter.eraseOp(placeholder);
|
||||
return val;
|
||||
|
@ -1044,7 +1044,7 @@ static Value buildUnaryPresent(RewriterBase &rewriter, Location loc,
|
|||
// Empty input value must be propagated.
|
||||
return Value();
|
||||
UnaryOp unop = cast<UnaryOp>(op);
|
||||
Region &presentRegion = unop.presentRegion();
|
||||
Region &presentRegion = unop.getPresentRegion();
|
||||
if (presentRegion.empty())
|
||||
// Uninitialized Value() will be interpreted as missing data in the
|
||||
// output.
|
||||
|
@ -1058,7 +1058,7 @@ static Value buildBinaryOverlap(RewriterBase &rewriter, Location loc,
|
|||
// Empty input values must be propagated.
|
||||
return Value();
|
||||
BinaryOp binop = cast<BinaryOp>(op);
|
||||
Region &overlapRegion = binop.overlapRegion();
|
||||
Region &overlapRegion = binop.getOverlapRegion();
|
||||
if (overlapRegion.empty())
|
||||
// Uninitialized Value() will be interpreted as missing data in the
|
||||
// output.
|
||||
|
|
|
@ -154,7 +154,7 @@ struct ReifyExpandOrCollapseShapeOp
|
|||
auto loc = op->getLoc();
|
||||
auto reshapeOp = cast<OpTy>(op);
|
||||
auto resultShape = getReshapeOutputShapeFromInputShape(
|
||||
b, loc, reshapeOp.src(), reshapeOp.getResultType().getShape(),
|
||||
b, loc, reshapeOp.getSrc(), reshapeOp.getResultType().getShape(),
|
||||
reshapeOp.getReassociationMaps());
|
||||
reifiedReturnShapes.push_back(getAsValues(b, loc, resultShape));
|
||||
return success();
|
||||
|
@ -178,7 +178,7 @@ struct ReifyPadOp
|
|||
// Shape along each dimension is source dim + low pad + high pad.
|
||||
SmallVector<Value> mapOperands;
|
||||
mapOperands.push_back(
|
||||
b.createOrFold<tensor::DimOp>(loc, padOp.source(), dim));
|
||||
b.createOrFold<tensor::DimOp>(loc, padOp.getSource(), dim));
|
||||
AffineExpr expr = b.getAffineDimExpr(0);
|
||||
unsigned numSymbols = 0;
|
||||
auto addOpFoldResult = [&](OpFoldResult valueOrAttr) {
|
||||
|
|
|
@ -141,7 +141,7 @@ Operation *tensor::bubbleUpPadSlice(OpBuilder &b, tensor::PadOp padOp,
|
|||
bool hasHighPad = getConstantIntValue(high) != static_cast<int64_t>(0);
|
||||
auto offset = getValueOrCreateConstantIndexOp(b, loc, offsets[dim]);
|
||||
auto length = getValueOrCreateConstantIndexOp(b, loc, sizes[dim]);
|
||||
auto srcSize = b.createOrFold<tensor::DimOp>(loc, padOp.source(), dim);
|
||||
auto srcSize = b.createOrFold<tensor::DimOp>(loc, padOp.getSource(), dim);
|
||||
|
||||
// The new amount of low padding is `low - offset`. Except for the case
|
||||
// where none of the low padding is read. In that case, the new amount of
|
||||
|
@ -246,13 +246,13 @@ Operation *tensor::bubbleUpPadSlice(OpBuilder &b, tensor::PadOp padOp,
|
|||
auto createPadOfExtractSlice = [&]() {
|
||||
// Create pad(extract_slice(x)).
|
||||
auto newSliceOp = b.create<tensor::ExtractSliceOp>(
|
||||
loc, padOp.source(), newOffsets, newLengths, newStrides);
|
||||
loc, padOp.getSource(), newOffsets, newLengths, newStrides);
|
||||
auto newPadOp = b.create<PadOp>(loc, newSliceOp, staticNewLows,
|
||||
staticNewHighs, newLows, newHighs);
|
||||
|
||||
// Copy region to new PadOp.
|
||||
BlockAndValueMapping bvm;
|
||||
padOp.region().cloneInto(&newPadOp.getRegion(), bvm);
|
||||
padOp.getRegion().cloneInto(&newPadOp.getRegion(), bvm);
|
||||
|
||||
// Cast result and return.
|
||||
return castResult(newPadOp);
|
||||
|
|
|
@ -27,7 +27,7 @@ PadOp mlir::tensor::createPadScalarOp(Type type, Value source, Value pad,
|
|||
int rank = padTensorOp.getResultType().getRank();
|
||||
SmallVector<Type> blockArgTypes(rank, builder.getIndexType());
|
||||
SmallVector<Location> blockArgLocs(rank, loc);
|
||||
auto ®ion = padTensorOp.region();
|
||||
auto ®ion = padTensorOp.getRegion();
|
||||
// `builder.createBlock` changes the insertion point within the block. Create
|
||||
// a guard to reset the insertion point of the builder after it is destroyed.
|
||||
OpBuilder::InsertionGuard guard(builder);
|
||||
|
|
|
@ -3167,7 +3167,7 @@ public:
|
|||
}
|
||||
SmallVector<bool> inBounds(xferOp.getTransferRank(), true);
|
||||
rewriter.replaceOpWithNewOp<TransferReadOp>(
|
||||
xferOp, xferOp.getVectorType(), extractOp.source(), newIndices,
|
||||
xferOp, xferOp.getVectorType(), extractOp.getSource(), newIndices,
|
||||
xferOp.getPadding(), ArrayRef<bool>{inBounds});
|
||||
|
||||
return success();
|
||||
|
@ -3520,7 +3520,7 @@ public:
|
|||
if (!insertOp.hasUnitStride())
|
||||
return failure();
|
||||
|
||||
auto xferOp = insertOp.source().getDefiningOp<TransferWriteOp>();
|
||||
auto xferOp = insertOp.getSource().getDefiningOp<TransferWriteOp>();
|
||||
if (!xferOp)
|
||||
return failure();
|
||||
// TODO: support 0-d corner case.
|
||||
|
@ -3575,7 +3575,7 @@ public:
|
|||
rewriter, insertOp.getLoc(), insertOp.getMixedOffsets());
|
||||
SmallVector<bool> inBounds(xferOp.getTransferRank(), true);
|
||||
rewriter.replaceOpWithNewOp<TransferWriteOp>(insertOp, xferOp.getVector(),
|
||||
insertOp.dest(), indices,
|
||||
insertOp.getDest(), indices,
|
||||
ArrayRef<bool>{inBounds});
|
||||
return success();
|
||||
}
|
||||
|
@ -3613,10 +3613,11 @@ public:
|
|||
PatternRewriter &rewriter) const override {
|
||||
if (!insertOp.hasUnitStride())
|
||||
return failure();
|
||||
auto extractOp = insertOp.source().getDefiningOp<tensor::ExtractSliceOp>();
|
||||
auto extractOp =
|
||||
insertOp.getSource().getDefiningOp<tensor::ExtractSliceOp>();
|
||||
if (!extractOp || !extractOp.hasUnitStride() || !extractOp->hasOneUse())
|
||||
return failure();
|
||||
auto transferOp = extractOp.source().getDefiningOp<TransferWriteOp>();
|
||||
auto transferOp = extractOp.getSource().getDefiningOp<TransferWriteOp>();
|
||||
if (!transferOp || !transferOp->hasOneUse())
|
||||
return failure();
|
||||
|
||||
|
@ -3668,7 +3669,7 @@ public:
|
|||
for (const auto &en : enumerate(newResultShape))
|
||||
newInBounds.push_back(en.value() == vectorShape[en.index()]);
|
||||
auto newExtractOp = rewriter.create<tensor::ExtractSliceOp>(
|
||||
extractOp.getLoc(), insertOp.getSourceType(), insertOp.dest(),
|
||||
extractOp.getLoc(), insertOp.getSourceType(), insertOp.getDest(),
|
||||
insertOp.getMixedOffsets(), insertOp.getMixedSizes(),
|
||||
insertOp.getMixedStrides());
|
||||
auto newTransferWriteOp = rewriter.create<TransferWriteOp>(
|
||||
|
@ -3676,7 +3677,7 @@ public:
|
|||
transferOp.getIndices(), transferOp.getPermutationMapAttr(),
|
||||
rewriter.getBoolArrayAttr(newInBounds));
|
||||
rewriter.updateRootInPlace(insertOp, [&]() {
|
||||
insertOp.sourceMutable().assign(newTransferWriteOp.getResult());
|
||||
insertOp.getSourceMutable().assign(newTransferWriteOp.getResult());
|
||||
});
|
||||
return success();
|
||||
}
|
||||
|
|
|
@ -128,7 +128,7 @@ struct TestLinalgElementwiseFusion
|
|||
[](const OpResult &producer, OpOperand &consumer) {
|
||||
if (auto collapseOp =
|
||||
producer.getDefiningOp<tensor::CollapseShapeOp>()) {
|
||||
if (!collapseOp.src().getDefiningOp<linalg::LinalgOp>()) {
|
||||
if (!collapseOp.getSrc().getDefiningOp<linalg::LinalgOp>()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -62,10 +62,10 @@ static void applyFoldConstantExtractSlicePatterns(Operation *rootOp) {
|
|||
RewritePatternSet patterns(rootOp->getContext());
|
||||
tensor::ControlConstantExtractSliceFusionFn controlFn =
|
||||
[](tensor::ExtractSliceOp op) {
|
||||
if (!op.source().hasOneUse())
|
||||
if (!op.getSource().hasOneUse())
|
||||
return false;
|
||||
|
||||
auto resultType = op.result().getType().cast<ShapedType>();
|
||||
auto resultType = op.getResult().getType().cast<ShapedType>();
|
||||
constexpr int64_t kConstantFoldingMaxNumElements = 1024;
|
||||
return resultType.getNumElements() <= kConstantFoldingMaxNumElements;
|
||||
};
|
||||
|
|
Loading…
Reference in New Issue