Replace usages of "Op::operator->" with ".".

This is step 2/N of removing the temporary operator-> method as part of the de-const transition.

PiperOrigin-RevId: 240200792
This commit is contained in:
River Riddle 2019-03-25 13:02:06 -07:00 committed by jpienaar
parent c8a311a788
commit 96ebde9cfd
20 changed files with 172 additions and 174 deletions

View File

@ -421,14 +421,14 @@ InstructionHandle InstructionHandle::create(Args... args) {
return InstructionHandle(
ScopedContext::getBuilder()
->create<Op>(ScopedContext::getLocation(), args...)
->getInstruction());
.getInstruction());
}
template <typename Op, typename... Args>
ValueHandle ValueHandle::create(Args... args) {
Instruction *inst = ScopedContext::getBuilder()
->create<Op>(ScopedContext::getLocation(), args...)
->getInstruction();
.getInstruction();
if (inst->getNumResults() == 1) {
return ValueHandle(inst->getResult(0));
} else if (inst->getNumResults() == 0) {

View File

@ -171,8 +171,8 @@ public:
static LogicalResult constantFoldHook(Instruction *op,
ArrayRef<Attribute> operands,
SmallVectorImpl<Attribute> &results) {
return op->cast<ConcreteType>()->constantFold(operands, results,
op->getContext());
return op->cast<ConcreteType>().constantFold(operands, results,
op->getContext());
}
/// Op implementations can implement this hook. It should attempt to constant
@ -193,7 +193,7 @@ public:
/// This is an implementation detail of the folder hook for AbstractOperation.
static LogicalResult foldHook(Instruction *op,
SmallVectorImpl<Value *> &results) {
return op->cast<ConcreteType>()->fold(results);
return op->cast<ConcreteType>().fold(results);
}
/// This hook implements a generalized folder for this operation. Operations
@ -241,7 +241,7 @@ public:
ArrayRef<Attribute> operands,
SmallVectorImpl<Attribute> &results) {
auto result =
op->cast<ConcreteType>()->constantFold(operands, op->getContext());
op->cast<ConcreteType>().constantFold(operands, op->getContext());
if (!result)
return failure();
@ -265,7 +265,7 @@ public:
/// This is an implementation detail of the folder hook for AbstractOperation.
static LogicalResult foldHook(Instruction *op,
SmallVectorImpl<Value *> &results) {
auto *result = op->cast<ConcreteType>()->fold();
auto *result = op->cast<ConcreteType>().fold();
if (!result)
return failure();
if (result != op->getResult(0))
@ -752,7 +752,7 @@ public:
auto opPointer = op->dyn_cast<ConcreteType>();
assert(opPointer &&
"op's name does not match name of concrete type instantiated with");
opPointer->print(p);
opPointer.print(p);
}
/// This is the hook that checks whether or not this instruction is well
@ -764,7 +764,7 @@ public:
/// diagnostic subsystem and returns true.
static bool verifyInvariants(Instruction *op) {
return BaseVerifier<Traits<ConcreteType>...>::verifyTrait(op) ||
op->cast<ConcreteType>()->verify();
op->cast<ConcreteType>().verify();
}
// Returns the properties of an operation by combining the properties of the

View File

@ -256,7 +256,7 @@ public:
template <typename OpTy, typename... Args>
void replaceOpWithNewOp(Instruction *op, Args... args) {
auto newOp = create<OpTy>(op->getLoc(), args...);
replaceOpWithResultsOfAnotherOp(op, newOp->getInstruction(), {});
replaceOpWithResultsOfAnotherOp(op, newOp.getInstruction(), {});
}
/// Replaces the result op with a new op that is created without verification.
@ -267,7 +267,7 @@ public:
ArrayRef<Value *> valuesToRemoveIfDead,
Args... args) {
auto newOp = create<OpTy>(op->getLoc(), args...);
replaceOpWithResultsOfAnotherOp(op, newOp->getInstruction(),
replaceOpWithResultsOfAnotherOp(op, newOp.getInstruction(),
valuesToRemoveIfDead);
}

View File

@ -66,7 +66,7 @@ bool mlir::isValidDim(Value *value) {
// The dim op is okay if its operand memref/tensor is defined at the top
// level.
if (auto dimOp = inst->dyn_cast<DimOp>())
return isTopLevelSymbol(dimOp->getOperand());
return isTopLevelSymbol(dimOp.getOperand());
return false;
}
// This value is a block argument (which also includes 'affine.for' loop IVs).
@ -87,11 +87,11 @@ bool mlir::isValidSymbol(Value *value) {
return true;
// Affine apply operation is ok if all of its operands are ok.
if (auto op = inst->dyn_cast<AffineApplyOp>())
return op->isValidSymbol();
return op.isValidSymbol();
// The dim op is okay if its operand memref/tensor is defined at the top
// level.
if (auto dimOp = inst->dyn_cast<DimOp>())
return isTopLevelSymbol(dimOp->getOperand());
return isTopLevelSymbol(dimOp.getOperand());
return false;
}
// Otherwise, the only valid symbol is a top level block argument.

View File

@ -235,9 +235,9 @@ static bool isContiguousAccess(Value &iv, LoadOrStoreOp memoryOp,
static_assert(std::is_same<LoadOrStoreOp, LoadOp>::value ||
std::is_same<LoadOrStoreOp, StoreOp>::value,
"Must be called on either const LoadOp & or const StoreOp &");
auto memRefType = memoryOp->getMemRefType();
auto memRefType = memoryOp.getMemRefType();
if (fastestVaryingDim >= memRefType.getRank()) {
memoryOp->emitError("fastest varying dim out of bounds");
memoryOp.emitError("fastest varying dim out of bounds");
return false;
}
@ -249,10 +249,10 @@ static bool isContiguousAccess(Value &iv, LoadOrStoreOp memoryOp,
(layoutMap.size() == 1 &&
!(layoutMap[0] ==
b.getMultiDimIdentityMap(layoutMap[0].getNumDims())))) {
return memoryOp->emitError("NYI: non-trivial layoutMap"), false;
return memoryOp.emitError("NYI: non-trivial layoutMap"), false;
}
auto indices = memoryOp->getIndices();
auto indices = memoryOp.getIndices();
auto numIndices = llvm::size(indices);
unsigned d = 0;
for (auto index : indices) {
@ -268,7 +268,7 @@ static bool isContiguousAccess(Value &iv, LoadOrStoreOp memoryOp,
template <typename LoadOrStoreOpPointer>
static bool isVectorElement(LoadOrStoreOpPointer memoryOp) {
auto memRefType = memoryOp->getMemRefType();
auto memRefType = memoryOp.getMemRefType();
return memRefType.getElementType().template isa<VectorType>();
}

View File

@ -384,7 +384,7 @@ LogicalResult mlir::boundCheckLoadOrStoreOp(LoadOrStoreOpPointer loadOrStoreOp,
LLVM_DEBUG(region.getConstraints()->dump());
bool outOfBounds = false;
unsigned rank = loadOrStoreOp->getMemRefType().getRank();
unsigned rank = loadOrStoreOp.getMemRefType().getRank();
// For each dimension, check for out of bounds.
for (unsigned r = 0; r < rank; r++) {
@ -394,7 +394,7 @@ LogicalResult mlir::boundCheckLoadOrStoreOp(LoadOrStoreOpPointer loadOrStoreOp,
// of upper and out of lower), and check if the constraint system is
// feasible. If it is, there is at least one point out of bounds.
SmallVector<int64_t, 4> ineq(rank + 1, 0);
int64_t dimSize = loadOrStoreOp->getMemRefType().getDimSize(r);
int64_t dimSize = loadOrStoreOp.getMemRefType().getDimSize(r);
// TODO(bondhugula): handle dynamic dim sizes.
if (dimSize == -1)
continue;
@ -403,7 +403,7 @@ LogicalResult mlir::boundCheckLoadOrStoreOp(LoadOrStoreOpPointer loadOrStoreOp,
ucst.addConstantLowerBound(r, dimSize);
outOfBounds = !ucst.isEmpty();
if (outOfBounds && emitError) {
loadOrStoreOp->emitOpError(
loadOrStoreOp.emitOpError(
"memref out of upper bound access along dimension #" + Twine(r + 1));
}
@ -414,7 +414,7 @@ LogicalResult mlir::boundCheckLoadOrStoreOp(LoadOrStoreOpPointer loadOrStoreOp,
lcst.addConstantUpperBound(r, -1);
outOfBounds = !lcst.isEmpty();
if (outOfBounds && emitError) {
loadOrStoreOp->emitOpError(
loadOrStoreOp.emitOpError(
"memref out of lower bound access along dimension #" + Twine(r + 1));
}
}
@ -622,21 +622,21 @@ AffineForOp mlir::insertBackwardComputationSlice(
// opinst from 'loadOrStoreOpInst'.
MemRefAccess::MemRefAccess(Instruction *loadOrStoreOpInst) {
if (auto loadOp = loadOrStoreOpInst->dyn_cast<LoadOp>()) {
memref = loadOp->getMemRef();
memref = loadOp.getMemRef();
opInst = loadOrStoreOpInst;
auto loadMemrefType = loadOp->getMemRefType();
auto loadMemrefType = loadOp.getMemRefType();
indices.reserve(loadMemrefType.getRank());
for (auto *index : loadOp->getIndices()) {
for (auto *index : loadOp.getIndices()) {
indices.push_back(index);
}
} else {
assert(loadOrStoreOpInst->isa<StoreOp>() && "load/store op expected");
auto storeOp = loadOrStoreOpInst->dyn_cast<StoreOp>();
opInst = loadOrStoreOpInst;
memref = storeOp->getMemRef();
auto storeMemrefType = storeOp->getMemRefType();
memref = storeOp.getMemRef();
auto storeMemrefType = storeOp.getMemRefType();
indices.reserve(storeMemrefType.getRank());
for (auto *index : storeOp->getIndices()) {
for (auto *index : storeOp.getIndices()) {
indices.push_back(index);
}
}

View File

@ -171,12 +171,12 @@ AffineMap mlir::makePermutationMap(
}
if (auto load = opInst->dyn_cast<LoadOp>()) {
return ::makePermutationMap(opInst->getContext(), load->getIndices(),
return ::makePermutationMap(opInst->getContext(), load.getIndices(),
enclosingLoopToVectorDim);
}
auto store = opInst->cast<StoreOp>();
return ::makePermutationMap(opInst->getContext(), store->getIndices(),
return ::makePermutationMap(opInst->getContext(), store.getIndices(),
enclosingLoopToVectorDim);
}
@ -194,10 +194,10 @@ bool mlir::matcher::operatesOnSuperVectors(Instruction &opInst,
bool mustDivide = false;
VectorType superVectorType;
if (auto read = opInst.dyn_cast<VectorTransferReadOp>()) {
superVectorType = read->getResultType();
superVectorType = read.getResultType();
mustDivide = true;
} else if (auto write = opInst.dyn_cast<VectorTransferWriteOp>()) {
superVectorType = write->getVectorType();
superVectorType = write.getVectorType();
mustDivide = true;
} else if (opInst.getNumResults() == 0) {
if (!opInst.isa<ReturnOp>()) {

View File

@ -69,7 +69,7 @@ MLIRContext *mlir::edsc::ScopedContext::getContext() {
mlir::edsc::ValueHandle::ValueHandle(index_t cst) {
auto *b = ScopedContext::getBuilder();
auto loc = ScopedContext::getLocation();
v = b->create<ConstantIndexOp>(loc, cst.v)->getResult();
v = b->create<ConstantIndexOp>(loc, cst.v).getResult();
t = v->getType();
}
@ -393,7 +393,7 @@ static ValueHandle createComparisonExpr(CmpIPredicate predicate,
auto op = ScopedContext::getBuilder()->create<CmpIOp>(
ScopedContext::getLocation(), predicate, lhs.getValue(), rhs.getValue());
return ValueHandle(op->getResult());
return ValueHandle(op.getResult());
}
ValueHandle mlir::edsc::op::operator==(ValueHandle lhs, ValueHandle rhs) {

View File

@ -450,7 +450,7 @@ struct OneToOneLLVMOpLowering : public LLVMLegalizationPattern<SourceOp> {
if (numResults == 0)
return {};
if (numResults == 1)
return {newOp->getInstruction()->getResult(0)};
return {newOp.getInstruction()->getResult(0)};
// Otherwise, it had been converted to an operation producing a structure.
// Extract individual results from the structure and return them as list.
@ -460,7 +460,7 @@ struct OneToOneLLVMOpLowering : public LLVMLegalizationPattern<SourceOp> {
auto type = TypeConverter::convert(op->getResult(i)->getType(),
this->dialect.getLLVMModule());
results.push_back(rewriter.create<LLVM::ExtractValueOp>(
op->getLoc(), type, newOp->getInstruction()->getResult(0),
op->getLoc(), type, newOp.getInstruction()->getResult(0),
this->getIntegerArrayAttr(rewriter, i)));
}
return results;
@ -546,19 +546,19 @@ struct AllocOpLowering : public LLVMLegalizationPattern<AllocOp> {
if (!LLVMLegalizationPattern<AllocOp>::match(op))
return matchFailure();
auto allocOp = op->cast<AllocOp>();
MemRefType type = allocOp->getType();
MemRefType type = allocOp.getType();
return isSupportedMemRefType(type) ? matchSuccess() : matchFailure();
}
SmallVector<Value *, 4> rewrite(Instruction *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
auto allocOp = op->cast<AllocOp>();
MemRefType type = allocOp->getType();
MemRefType type = allocOp.getType();
// Get actual sizes of the memref as values: static sizes are constant
// values and dynamic sizes are passed to 'alloc' as operands.
SmallVector<Value *, 4> sizes;
auto numOperands = allocOp->getNumOperands();
auto numOperands = allocOp.getNumOperands();
sizes.reserve(numOperands);
unsigned i = 0;
for (int64_t s : type.getShape())
@ -607,7 +607,7 @@ struct AllocOpLowering : public LLVMLegalizationPattern<AllocOp> {
.create<LLVM::CallOp>(op->getLoc(), getVoidPtrType(),
rewriter.getFunctionAttr(mallocFunc),
cumulativeSize)
->getResult(0);
.getResult(0);
auto structElementType = TypeConverter::convert(elementType, getModule());
auto elementPtrType = LLVM::LLVMType::get(
op->getContext(), structElementType.cast<LLVM::LLVMType>()
@ -688,8 +688,8 @@ struct MemRefCastOpLowering : public LLVMLegalizationPattern<MemRefCastOp> {
return matchFailure();
auto memRefCastOp = op->cast<MemRefCastOp>();
MemRefType sourceType =
memRefCastOp->getOperand()->getType().cast<MemRefType>();
MemRefType targetType = memRefCastOp->getType();
memRefCastOp.getOperand()->getType().cast<MemRefType>();
MemRefType targetType = memRefCastOp.getType();
return (isSupportedMemRefType(targetType) &&
isSupportedMemRefType(sourceType))
? matchSuccess()
@ -699,8 +699,8 @@ struct MemRefCastOpLowering : public LLVMLegalizationPattern<MemRefCastOp> {
SmallVector<Value *, 4> rewrite(Instruction *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
auto memRefCastOp = op->cast<MemRefCastOp>();
auto targetType = memRefCastOp->getType();
auto sourceType = memRefCastOp->getOperand()->getType().cast<MemRefType>();
auto targetType = memRefCastOp.getType();
auto sourceType = memRefCastOp.getOperand()->getType().cast<MemRefType>();
// Copy the data buffer pointer.
auto elementTypePtr =
@ -767,7 +767,7 @@ struct DimOpLowering : public LLVMLegalizationPattern<DimOp> {
if (!LLVMLegalizationPattern<DimOp>::match(op))
return this->matchFailure();
auto dimOp = op->cast<DimOp>();
MemRefType type = dimOp->getOperand()->getType().cast<MemRefType>();
MemRefType type = dimOp.getOperand()->getType().cast<MemRefType>();
return isSupportedMemRefType(type) ? matchSuccess() : matchFailure();
}
@ -775,11 +775,11 @@ struct DimOpLowering : public LLVMLegalizationPattern<DimOp> {
FuncBuilder &rewriter) const override {
assert(operands.size() == 1 && "expected exactly one operand");
auto dimOp = op->cast<DimOp>();
MemRefType type = dimOp->getOperand()->getType().cast<MemRefType>();
MemRefType type = dimOp.getOperand()->getType().cast<MemRefType>();
SmallVector<Value *, 4> results;
auto shape = type.getShape();
uint64_t index = dimOp->getIndex();
uint64_t index = dimOp.getIndex();
// Extract dynamic size from the memref descriptor and define static size
// as a constant.
if (shape[index] == -1) {
@ -814,7 +814,7 @@ struct LoadStoreOpLowering : public LLVMLegalizationPattern<Derived> {
if (!LLVMLegalizationPattern<Derived>::match(op))
return this->matchFailure();
auto loadOp = op->cast<Derived>();
MemRefType type = loadOp->getMemRefType();
MemRefType type = loadOp.getMemRefType();
return isSupportedMemRefType(type) ? this->matchSuccess()
: this->matchFailure();
}
@ -918,7 +918,7 @@ struct LoadOpLowering : public LoadStoreOpLowering<LoadOp> {
SmallVector<Value *, 4> rewrite(Instruction *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
auto loadOp = op->cast<LoadOp>();
auto type = loadOp->getMemRefType();
auto type = loadOp.getMemRefType();
Value *dataPtr = getDataPtr(op->getLoc(), type, operands.front(),
operands.drop_front(), rewriter, getModule());
@ -940,7 +940,7 @@ struct StoreOpLowering : public LoadStoreOpLowering<StoreOp> {
SmallVector<Value *, 4> rewrite(Instruction *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
auto storeOp = op->cast<StoreOp>();
auto type = storeOp->getMemRefType();
auto type = storeOp.getMemRefType();
Value *dataPtr = getDataPtr(op->getLoc(), type, operands[1],
operands.drop_front(2), rewriter, getModule());

View File

@ -135,7 +135,7 @@ struct MemRefCastFolder : public RewritePattern {
for (unsigned i = 0, e = op->getNumOperands(); i != e; ++i)
if (auto *memref = op->getOperand(i)->getDefiningInst())
if (auto cast = memref->dyn_cast<MemRefCastOp>())
op->setOperand(i, cast->getOperand());
op->setOperand(i, cast.getOperand());
rewriter.updatedRootInPlace(op);
}
};
@ -293,7 +293,7 @@ struct SimplifyAllocConst : public RewritePattern {
// Check to see if any dimensions operands are constants. If so, we can
// substitute and drop them.
for (auto *operand : alloc->getOperands())
for (auto *operand : alloc.getOperands())
if (matchPattern(operand, m_ConstantIndex()))
return matchSuccess();
return matchFailure();
@ -301,7 +301,7 @@ struct SimplifyAllocConst : public RewritePattern {
void rewrite(Instruction *op, PatternRewriter &rewriter) const override {
auto allocOp = op->cast<AllocOp>();
auto memrefType = allocOp->getType();
auto memrefType = allocOp.getType();
// Ok, we have one or more constant operands. Collect the non-constant ones
// and keep track of the resultant memref type to build.
@ -318,7 +318,7 @@ struct SimplifyAllocConst : public RewritePattern {
newShapeConstants.push_back(dimSize);
continue;
}
auto *defOp = allocOp->getOperand(dynamicDimPos)->getDefiningInst();
auto *defOp = allocOp.getOperand(dynamicDimPos)->getDefiningInst();
ConstantIndexOp constantIndexOp;
if (defOp && (constantIndexOp = defOp->dyn_cast<ConstantIndexOp>())) {
// Dynamic shape dimension will be folded.
@ -328,7 +328,7 @@ struct SimplifyAllocConst : public RewritePattern {
} else {
// Dynamic shape dimension not folded; copy operand from old memref.
newShapeConstants.push_back(-1);
newOperands.push_back(allocOp->getOperand(dynamicDimPos));
newOperands.push_back(allocOp.getOperand(dynamicDimPos));
}
dynamicDimPos++;
}
@ -341,10 +341,10 @@ struct SimplifyAllocConst : public RewritePattern {
// Create and insert the alloc op for the new memref.
auto newAlloc =
rewriter.create<AllocOp>(allocOp->getLoc(), newMemRefType, newOperands);
rewriter.create<AllocOp>(allocOp.getLoc(), newMemRefType, newOperands);
// Insert a cast so we have the same type as the old alloc.
auto resultCast = rewriter.create<MemRefCastOp>(allocOp->getLoc(), newAlloc,
allocOp->getType());
auto resultCast = rewriter.create<MemRefCastOp>(allocOp.getLoc(), newAlloc,
allocOp.getType());
rewriter.replaceOp(op, {resultCast}, droppedOperands);
}
@ -360,7 +360,7 @@ struct SimplifyDeadAlloc : public RewritePattern {
PatternRewriter &rewriter) const override {
// Check if the alloc'ed value has any uses.
auto alloc = op->cast<AllocOp>();
if (!alloc->use_empty())
if (!alloc.use_empty())
return matchFailure();
// If it doesn't, we can eliminate it.
@ -493,7 +493,7 @@ struct SimplifyIndirectCallWithKnownCallee : public RewritePattern {
// Check that the callee is a constant operation.
Attribute callee;
if (!matchPattern(indirectCall->getCallee(), m_Constant(&callee)))
if (!matchPattern(indirectCall.getCallee(), m_Constant(&callee)))
return matchFailure();
// Check that the constant callee is a function.
@ -502,7 +502,7 @@ struct SimplifyIndirectCallWithKnownCallee : public RewritePattern {
return matchFailure();
// Replace with a direct call.
SmallVector<Value *, 8> callOperands(indirectCall->getArgOperands());
SmallVector<Value *, 8> callOperands(indirectCall.getArgOperands());
rewriter.replaceOpWithNewOp<CallOp>(op, calledFn.getValue(), callOperands);
return matchSuccess();
}
@ -803,7 +803,7 @@ struct SimplifyConstCondBranchPred : public RewritePattern {
auto condbr = op->cast<CondBranchOp>();
// Check that the condition is a constant.
if (!matchPattern(condbr->getCondition(), m_Op<ConstantOp>()))
if (!matchPattern(condbr.getCondition(), m_Op<ConstantOp>()))
return matchFailure();
Block *foldedDest;
@ -812,14 +812,13 @@ struct SimplifyConstCondBranchPred : public RewritePattern {
// If the condition is known to evaluate to false we fold to a branch to the
// false destination. Otherwise, we fold to a branch to the true
// destination.
if (matchPattern(condbr->getCondition(), m_Zero())) {
foldedDest = condbr->getFalseDest();
branchArgs.assign(condbr->false_operand_begin(),
condbr->false_operand_end());
if (matchPattern(condbr.getCondition(), m_Zero())) {
foldedDest = condbr.getFalseDest();
branchArgs.assign(condbr.false_operand_begin(),
condbr.false_operand_end());
} else {
foldedDest = condbr->getTrueDest();
branchArgs.assign(condbr->true_operand_begin(),
condbr->true_operand_end());
foldedDest = condbr.getTrueDest();
branchArgs.assign(condbr.true_operand_begin(), condbr.true_operand_end());
}
rewriter.replaceOpWithNewOp<BranchOp>(op, foldedDest, branchArgs);
@ -1095,7 +1094,7 @@ struct SimplifyDeadDealloc : public RewritePattern {
auto dealloc = op->cast<DeallocOp>();
// Check that the memref operand's defining instruction is an AllocOp.
Value *memref = dealloc->getMemRef();
Value *memref = dealloc.getMemRef();
Instruction *defOp = memref->getDefiningInst();
if (!defOp || !defOp->isa<AllocOp>())
return matchFailure();
@ -1986,15 +1985,15 @@ namespace {
///
struct SimplifyXMinusX : public RewritePattern {
SimplifyXMinusX(MLIRContext *context)
: RewritePattern(SubIOp::getOperationName(), 10, context) {}
: RewritePattern(SubIOp::getOperationName(), 1, context) {}
PatternMatchResult matchAndRewrite(Instruction *op,
PatternRewriter &rewriter) const override {
auto subi = op->cast<SubIOp>();
if (subi->getOperand(0) != subi->getOperand(1))
if (subi.getOperand(0) != subi.getOperand(1))
return matchFailure();
rewriter.replaceOpWithNewOp<ConstantIntOp>(op, 0, subi->getType());
rewriter.replaceOpWithNewOp<ConstantIntOp>(op, 0, subi.getType());
return matchSuccess();
}
};

View File

@ -56,7 +56,7 @@ void ConstantFold::foldInstruction(Instruction *op) {
Attribute operandCst = nullptr;
if (auto *operandOp = operand->getDefiningInst()) {
if (auto operandConstantOp = operandOp->dyn_cast<ConstantOp>())
operandCst = operandConstantOp->getValue();
operandCst = operandConstantOp.getValue();
}
operandConstants.push_back(operandCst);
}

View File

@ -168,12 +168,12 @@ static bool getFullMemRefAsRegion(Instruction *opInst, unsigned numParamLoopIVs,
MemRefRegion *region) {
unsigned rank;
if (auto loadOp = opInst->dyn_cast<LoadOp>()) {
rank = loadOp->getMemRefType().getRank();
region->memref = loadOp->getMemRef();
rank = loadOp.getMemRefType().getRank();
region->memref = loadOp.getMemRef();
region->setWrite(false);
} else if (auto storeOp = opInst->dyn_cast<StoreOp>()) {
rank = storeOp->getMemRefType().getRank();
region->memref = storeOp->getMemRef();
rank = storeOp.getMemRefType().getRank();
region->memref = storeOp.getMemRef();
region->setWrite(true);
} else {
assert(false && "expected load or store op");
@ -317,7 +317,7 @@ bool DmaGeneration::generateDma(const MemRefRegion &region, Block *block,
memIndices.push_back(zeroIndex);
} else {
memIndices.push_back(
top.create<ConstantIndexOp>(loc, indexVal)->getResult());
top.create<ConstantIndexOp>(loc, indexVal).getResult());
}
} else {
// The coordinate for the start location is just the lower bound along the
@ -345,7 +345,7 @@ bool DmaGeneration::generateDma(const MemRefRegion &region, Block *block,
// Create the fast memory space buffer just before the 'affine.for'
// instruction.
fastMemRef = prologue.create<AllocOp>(loc, fastMemRefType)->getResult();
fastMemRef = prologue.create<AllocOp>(loc, fastMemRefType).getResult();
// Record it.
fastBufferMap[memref] = fastMemRef;
// fastMemRefType is a constant shaped memref.
@ -608,10 +608,10 @@ uint64_t DmaGeneration::runOnBlock(Block::iterator begin, Block::iterator end) {
block->walk(begin, end, [&](Instruction *opInst) {
// Gather regions to allocate to buffers in faster memory space.
if (auto loadOp = opInst->dyn_cast<LoadOp>()) {
if (loadOp->getMemRefType().getMemorySpace() != slowMemorySpace)
if (loadOp.getMemRefType().getMemorySpace() != slowMemorySpace)
return;
} else if (auto storeOp = opInst->dyn_cast<StoreOp>()) {
if (storeOp->getMemRefType().getMemorySpace() != slowMemorySpace)
if (storeOp.getMemRefType().getMemorySpace() != slowMemorySpace)
return;
} else {
// Neither load nor a store op.

View File

@ -174,7 +174,7 @@ public:
unsigned getLoadOpCount(Value *memref) {
unsigned loadOpCount = 0;
for (auto *loadOpInst : loads) {
if (memref == loadOpInst->cast<LoadOp>()->getMemRef())
if (memref == loadOpInst->cast<LoadOp>().getMemRef())
++loadOpCount;
}
return loadOpCount;
@ -184,7 +184,7 @@ public:
unsigned getStoreOpCount(Value *memref) {
unsigned storeOpCount = 0;
for (auto *storeOpInst : stores) {
if (memref == storeOpInst->cast<StoreOp>()->getMemRef())
if (memref == storeOpInst->cast<StoreOp>().getMemRef())
++storeOpCount;
}
return storeOpCount;
@ -194,7 +194,7 @@ public:
void getStoreOpsForMemref(Value *memref,
SmallVectorImpl<Instruction *> *storeOps) {
for (auto *storeOpInst : stores) {
if (memref == storeOpInst->cast<StoreOp>()->getMemRef())
if (memref == storeOpInst->cast<StoreOp>().getMemRef())
storeOps->push_back(storeOpInst);
}
}
@ -203,7 +203,7 @@ public:
void getLoadOpsForMemref(Value *memref,
SmallVectorImpl<Instruction *> *loadOps) {
for (auto *loadOpInst : loads) {
if (memref == loadOpInst->cast<LoadOp>()->getMemRef())
if (memref == loadOpInst->cast<LoadOp>().getMemRef())
loadOps->push_back(loadOpInst);
}
}
@ -213,10 +213,10 @@ public:
void getLoadAndStoreMemrefSet(DenseSet<Value *> *loadAndStoreMemrefSet) {
llvm::SmallDenseSet<Value *, 2> loadMemrefs;
for (auto *loadOpInst : loads) {
loadMemrefs.insert(loadOpInst->cast<LoadOp>()->getMemRef());
loadMemrefs.insert(loadOpInst->cast<LoadOp>().getMemRef());
}
for (auto *storeOpInst : stores) {
auto *memref = storeOpInst->cast<StoreOp>()->getMemRef();
auto *memref = storeOpInst->cast<StoreOp>().getMemRef();
if (loadMemrefs.count(memref) > 0)
loadAndStoreMemrefSet->insert(memref);
}
@ -300,7 +300,7 @@ public:
bool writesToLiveInOrEscapingMemrefs(unsigned id) {
Node *node = getNode(id);
for (auto *storeOpInst : node->stores) {
auto *memref = storeOpInst->cast<StoreOp>()->getMemRef();
auto *memref = storeOpInst->cast<StoreOp>().getMemRef();
auto *inst = memref->getDefiningInst();
// Return true if 'memref' is a block argument.
if (!inst)
@ -325,7 +325,7 @@ public:
Node *node = getNode(id);
for (auto *storeOpInst : node->stores) {
// Return false if there exist out edges from 'id' on 'memref'.
if (getOutEdgeCount(id, storeOpInst->cast<StoreOp>()->getMemRef()) > 0)
if (getOutEdgeCount(id, storeOpInst->cast<StoreOp>().getMemRef()) > 0)
return false;
}
return true;
@ -648,12 +648,12 @@ bool MemRefDependenceGraph::init(Function *f) {
Node node(nextNodeId++, &inst);
for (auto *opInst : collector.loadOpInsts) {
node.loads.push_back(opInst);
auto *memref = opInst->cast<LoadOp>()->getMemRef();
auto *memref = opInst->cast<LoadOp>().getMemRef();
memrefAccesses[memref].insert(node.id);
}
for (auto *opInst : collector.storeOpInsts) {
node.stores.push_back(opInst);
auto *memref = opInst->cast<StoreOp>()->getMemRef();
auto *memref = opInst->cast<StoreOp>().getMemRef();
memrefAccesses[memref].insert(node.id);
}
forToNodeMap[&inst] = node.id;
@ -662,14 +662,14 @@ bool MemRefDependenceGraph::init(Function *f) {
// Create graph node for top-level load op.
Node node(nextNodeId++, &inst);
node.loads.push_back(&inst);
auto *memref = inst.cast<LoadOp>()->getMemRef();
auto *memref = inst.cast<LoadOp>().getMemRef();
memrefAccesses[memref].insert(node.id);
nodes.insert({node.id, node});
} else if (auto storeOp = inst.dyn_cast<StoreOp>()) {
// Create graph node for top-level store op.
Node node(nextNodeId++, &inst);
node.stores.push_back(&inst);
auto *memref = inst.cast<StoreOp>()->getMemRef();
auto *memref = inst.cast<StoreOp>().getMemRef();
memrefAccesses[memref].insert(node.id);
nodes.insert({node.id, node});
} else if (inst.getNumRegions() != 0) {
@ -880,7 +880,7 @@ moveLoadsAccessingMemrefTo(Value *memref,
dstLoads->clear();
SmallVector<Instruction *, 4> srcLoadsToKeep;
for (auto *load : *srcLoads) {
if (load->cast<LoadOp>()->getMemRef() == memref)
if (load->cast<LoadOp>().getMemRef() == memref)
dstLoads->push_back(load);
else
srcLoadsToKeep.push_back(load);
@ -1126,7 +1126,7 @@ static Value *createPrivateMemRef(AffineForOp forOp,
// Builder to create constants at the top level.
FuncBuilder top(forInst->getFunction());
// Create new memref type based on slice bounds.
auto *oldMemRef = srcStoreOpInst->cast<StoreOp>()->getMemRef();
auto *oldMemRef = srcStoreOpInst->cast<StoreOp>().getMemRef();
auto oldMemRefType = oldMemRef->getType().cast<MemRefType>();
unsigned rank = oldMemRefType.getRank();
@ -1857,7 +1857,7 @@ public:
DenseSet<Value *> visitedMemrefs;
while (!loads.empty()) {
// Get memref of load on top of the stack.
auto *memref = loads.back()->cast<LoadOp>()->getMemRef();
auto *memref = loads.back()->cast<LoadOp>().getMemRef();
if (visitedMemrefs.count(memref) > 0)
continue;
visitedMemrefs.insert(memref);
@ -1920,7 +1920,7 @@ public:
// Gather 'dstNode' store ops to 'memref'.
SmallVector<Instruction *, 2> dstStoreOpInsts;
for (auto *storeOpInst : dstNode->stores)
if (storeOpInst->cast<StoreOp>()->getMemRef() == memref)
if (storeOpInst->cast<StoreOp>().getMemRef() == memref)
dstStoreOpInsts.push_back(storeOpInst);
unsigned bestDstLoopDepth;
@ -1956,7 +1956,7 @@ public:
// Create private memref for 'memref' in 'dstAffineForOp'.
SmallVector<Instruction *, 4> storesForMemref;
for (auto *storeOpInst : sliceCollector.storeOpInsts) {
if (storeOpInst->cast<StoreOp>()->getMemRef() == memref)
if (storeOpInst->cast<StoreOp>().getMemRef() == memref)
storesForMemref.push_back(storeOpInst);
}
assert(storesForMemref.size() == 1);
@ -1978,7 +1978,7 @@ public:
// Add new load ops to current Node load op list 'loads' to
// continue fusing based on new operands.
for (auto *loadOpInst : dstLoopCollector.loadOpInsts) {
auto *loadMemRef = loadOpInst->cast<LoadOp>()->getMemRef();
auto *loadMemRef = loadOpInst->cast<LoadOp>().getMemRef();
if (visitedMemrefs.count(loadMemRef) == 0)
loads.push_back(loadOpInst);
}
@ -2163,7 +2163,7 @@ public:
// Check that all stores are to the same memref.
DenseSet<Value *> storeMemrefs;
for (auto *storeOpInst : sibNode->stores) {
storeMemrefs.insert(storeOpInst->cast<StoreOp>()->getMemRef());
storeMemrefs.insert(storeOpInst->cast<StoreOp>().getMemRef());
}
if (storeMemrefs.size() != 1)
return;

View File

@ -108,14 +108,14 @@ public:
/// Used for staging the transfer in a local scalar buffer.
MemRefType tmpMemRefType() {
auto vectorType = transfer->getVectorType();
auto vectorType = transfer.getVectorType();
return MemRefType::get(vectorType.getShape(), vectorType.getElementType(),
{}, 0);
}
/// View of tmpMemRefType as one vector, used in vector load/store to tmp
/// buffer.
MemRefType vectorMemRefType() {
return MemRefType::get({1}, transfer->getVectorType(), {}, 0);
return MemRefType::get({1}, transfer.getVectorType(), {}, 0);
}
/// Performs the rewrite.
void rewrite();
@ -137,12 +137,12 @@ void coalesceCopy(VectorTransferOpTy transfer,
edsc::VectorView *vectorView) {
// rank of the remote memory access, coalescing behavior occurs on the
// innermost memory dimension.
auto remoteRank = transfer->getMemRefType().getRank();
auto remoteRank = transfer.getMemRefType().getRank();
// Iterate over the results expressions of the permutation map to determine
// the loop order for creating pointwise copies between remote and local
// memories.
int coalescedIdx = -1;
auto exprs = transfer->getPermutationMap().getResults();
auto exprs = transfer.getPermutationMap().getResults();
for (auto en : llvm::enumerate(exprs)) {
auto dim = en.value().template dyn_cast<AffineDimExpr>();
if (!dim) {
@ -173,7 +173,7 @@ clip(VectorTransferOpTy transfer, edsc::MemRefView &view,
using edsc::intrinsics::select;
IndexHandle zero(index_t(0)), one(index_t(1));
llvm::SmallVector<edsc::ValueHandle, 8> memRefAccess(transfer->getIndices());
llvm::SmallVector<edsc::ValueHandle, 8> memRefAccess(transfer.getIndices());
llvm::SmallVector<edsc::ValueHandle, 8> clippedScalarAccessExprs(
memRefAccess.size(), edsc::IndexHandle());
@ -183,7 +183,7 @@ clip(VectorTransferOpTy transfer, edsc::MemRefView &view,
++memRefDim) {
// Linear search on a small number of entries.
int loopIndex = -1;
auto exprs = transfer->getPermutationMap().getResults();
auto exprs = transfer.getPermutationMap().getResults();
for (auto en : llvm::enumerate(exprs)) {
auto expr = en.value();
auto dim = expr.template dyn_cast<AffineDimExpr>();
@ -267,11 +267,11 @@ template <> void VectorTransferRewriter<VectorTransferReadOp>::rewrite() {
using namespace mlir::edsc::intrinsics;
// 1. Setup all the captures.
ScopedContext scope(FuncBuilder(transfer->getInstruction()),
transfer->getLoc());
IndexedValue remote(transfer->getMemRef());
MemRefView view(transfer->getMemRef());
VectorView vectorView(transfer->getVector());
ScopedContext scope(FuncBuilder(transfer.getInstruction()),
transfer.getLoc());
IndexedValue remote(transfer.getMemRef());
MemRefView view(transfer.getMemRef());
VectorView vectorView(transfer.getVector());
SmallVector<IndexHandle, 8> ivs =
IndexHandle::makeIndexHandles(vectorView.rank());
SmallVector<ValueHandle *, 8> pivs =
@ -294,8 +294,8 @@ template <> void VectorTransferRewriter<VectorTransferReadOp>::rewrite() {
(dealloc(tmp)); // vexing parse
// 3. Propagate.
transfer->replaceAllUsesWith(vectorValue.getValue());
transfer->erase();
transfer.replaceAllUsesWith(vectorValue.getValue());
transfer.erase();
}
/// Lowers VectorTransferWriteOp into a combination of:
@ -322,12 +322,12 @@ template <> void VectorTransferRewriter<VectorTransferWriteOp>::rewrite() {
using namespace mlir::edsc::intrinsics;
// 1. Setup all the captures.
ScopedContext scope(FuncBuilder(transfer->getInstruction()),
transfer->getLoc());
IndexedValue remote(transfer->getMemRef());
MemRefView view(transfer->getMemRef());
ValueHandle vectorValue(transfer->getVector());
VectorView vectorView(transfer->getVector());
ScopedContext scope(FuncBuilder(transfer.getInstruction()),
transfer.getLoc());
IndexedValue remote(transfer.getMemRef());
MemRefView view(transfer.getMemRef());
ValueHandle vectorValue(transfer.getVector());
VectorView vectorView(transfer.getVector());
SmallVector<IndexHandle, 8> ivs =
IndexHandle::makeIndexHandles(vectorView.rank());
SmallVector<ValueHandle *, 8> pivs =
@ -349,7 +349,7 @@ template <> void VectorTransferRewriter<VectorTransferWriteOp>::rewrite() {
});
(dealloc(tmp)); // vexing parse...
transfer->erase();
transfer.erase();
}
namespace {

View File

@ -447,7 +447,7 @@ static AffineMap projectedPermutationMap(VectorTransferOpTy transfer,
std::is_same<VectorTransferOpTy, VectorTransferReadOp>::value ||
std::is_same<VectorTransferOpTy, VectorTransferWriteOp>::value,
"Must be called on a VectorTransferOp");
auto superVectorType = transfer->getVectorType();
auto superVectorType = transfer.getVectorType();
auto optionalRatio = shapeRatio(superVectorType, hwVectorType);
assert(optionalRatio &&
(optionalRatio->size() == superVectorType.getShape().size()) &&
@ -465,7 +465,7 @@ static AffineMap projectedPermutationMap(VectorTransferOpTy transfer,
++dim;
},
superVectorType.getShape(), *optionalRatio);
auto permutationMap = transfer->getPermutationMap();
auto permutationMap = transfer.getPermutationMap();
LLVM_DEBUG(permutationMap.print(dbgs() << "\npermutationMap: "));
if (keep.empty()) {
return permutationMap;
@ -486,17 +486,17 @@ static Instruction *instantiate(FuncBuilder *b, VectorTransferReadOp read,
ArrayRef<unsigned> hwVectorInstance,
DenseMap<Value *, Value *> *substitutionsMap) {
SmallVector<Value *, 8> indices =
map(makePtrDynCaster<Value>(), read->getIndices());
map(makePtrDynCaster<Value>(), read.getIndices());
auto affineIndices =
reindexAffineIndices(b, hwVectorType, hwVectorInstance, indices);
auto map = projectedPermutationMap(read, hwVectorType);
if (!map) {
return nullptr;
}
auto cloned = b->create<VectorTransferReadOp>(
read->getLoc(), hwVectorType, read->getMemRef(), affineIndices, map,
read->getPaddingValue());
return cloned->getInstruction();
auto cloned = b->create<VectorTransferReadOp>(read.getLoc(), hwVectorType,
read.getMemRef(), affineIndices,
map, read.getPaddingValue());
return cloned.getInstruction();
}
/// Creates an instantiated version of `write` for the instance of
@ -510,15 +510,15 @@ static Instruction *instantiate(FuncBuilder *b, VectorTransferWriteOp write,
ArrayRef<unsigned> hwVectorInstance,
DenseMap<Value *, Value *> *substitutionsMap) {
SmallVector<Value *, 8> indices =
map(makePtrDynCaster<Value>(), write->getIndices());
map(makePtrDynCaster<Value>(), write.getIndices());
auto affineIndices =
reindexAffineIndices(b, hwVectorType, hwVectorInstance, indices);
auto cloned = b->create<VectorTransferWriteOp>(
write->getLoc(),
substitute(write->getVector(), hwVectorType, substitutionsMap),
write->getMemRef(), affineIndices,
write.getLoc(),
substitute(write.getVector(), hwVectorType, substitutionsMap),
write.getMemRef(), affineIndices,
projectedPermutationMap(write, hwVectorType));
return cloned->getInstruction();
return cloned.getInstruction();
}
/// Returns `true` if inst instance is properly cloned and inserted, false
@ -568,7 +568,7 @@ static bool instantiateMaterialization(Instruction *inst,
return true;
}
state->substitutionsMap->insert(
std::make_pair(read->getResult(), clone->getResult(0)));
std::make_pair(read.getResult(), clone->getResult(0)));
return false;
}
// The only op with 0 results reaching this point must, by construction, be
@ -712,7 +712,7 @@ static bool materialize(Function *f,
// Emit the current slice.
// Set scoped super-vector and corresponding hw vector types.
state->superVectorType = terminator->getVectorType();
state->superVectorType = terminator.getVectorType();
assert((state->superVectorType.getElementType() ==
FloatType::getF32(term->getContext())) &&
"Only f32 supported for now");

View File

@ -95,18 +95,18 @@ FunctionPassBase *mlir::createMemRefDataFlowOptPass() {
// this in the future if needed.
void MemRefDataFlowOpt::forwardStoreToLoad(LoadOp loadOp) {
Instruction *lastWriteStoreOp = nullptr;
Instruction *loadOpInst = loadOp->getInstruction();
Instruction *loadOpInst = loadOp.getInstruction();
// First pass over the use list to get minimum number of surrounding
// loops common between the load op and the store op, with min taken across
// all store ops.
SmallVector<Instruction *, 8> storeOps;
unsigned minSurroundingLoops = getNestingDepth(*loadOpInst);
for (InstOperand &use : loadOp->getMemRef()->getUses()) {
for (InstOperand &use : loadOp.getMemRef()->getUses()) {
auto storeOp = use.getOwner()->dyn_cast<StoreOp>();
if (!storeOp)
continue;
auto *storeOpInst = storeOp->getInstruction();
auto *storeOpInst = storeOp.getInstruction();
unsigned nsLoops = getNumCommonSurroundingLoops(*loadOpInst, *storeOpInst);
minSurroundingLoops = std::min(nsLoops, minSurroundingLoops);
storeOps.push_back(storeOpInst);
@ -169,7 +169,7 @@ void MemRefDataFlowOpt::forwardStoreToLoad(LoadOp loadOp) {
MemRefRegion region(loadOpInst->getLoc());
region.compute(loadOpInst, nsLoops);
if (!region.getConstraints()->isRangeOneToOne(
/*start=*/0, /*limit=*/loadOp->getMemRefType().getRank()))
/*start=*/0, /*limit=*/loadOp.getMemRefType().getRank()))
break;
}
@ -201,10 +201,10 @@ void MemRefDataFlowOpt::forwardStoreToLoad(LoadOp loadOp) {
return;
// Perform the actual store to load forwarding.
Value *storeVal = lastWriteStoreOp->cast<StoreOp>()->getValueToStore();
loadOp->getResult()->replaceAllUsesWith(storeVal);
Value *storeVal = lastWriteStoreOp->cast<StoreOp>().getValueToStore();
loadOp.getResult()->replaceAllUsesWith(storeVal);
// Record the memref for a later sweep to optimize away.
memrefsToErase.insert(loadOp->getMemRef());
memrefsToErase.insert(loadOp.getMemRef());
// Record this to erase later.
loadOpsToErase.push_back(loadOpInst);
}

View File

@ -125,7 +125,7 @@ static bool doubleBuffer(Value *oldMemRef, AffineForOp forOp) {
/*domInstFilter=*/&*forOp.getBody()->begin())) {
LLVM_DEBUG(
forOp.emitError("memref replacement for double buffering failed"));
ivModTwoOp->getInstruction()->erase();
ivModTwoOp.erase();
return false;
}
// Insert the dealloc op right after the for loop.
@ -152,10 +152,10 @@ void PipelineDataTransfer::runOnFunction() {
// Check if tags of the dma start op and dma wait op match.
static bool checkTagMatch(DmaStartOp startOp, DmaWaitOp waitOp) {
if (startOp->getTagMemRef() != waitOp->getTagMemRef())
if (startOp.getTagMemRef() != waitOp.getTagMemRef())
return false;
auto startIndices = startOp->getTagIndices();
auto waitIndices = waitOp->getTagIndices();
auto startIndices = startOp.getTagIndices();
auto waitIndices = waitOp.getTagIndices();
// Both of these have the same number of indices since they correspond to the
// same tag memref.
for (auto it = startIndices.begin(), wIt = waitIndices.begin(),
@ -182,7 +182,7 @@ static void findMatchingStartFinishInsts(
SmallVector<DmaStartOp, 4> outgoingDmaOps;
for (auto &inst : *forOp.getBody()) {
auto dmaStartOp = inst.dyn_cast<DmaStartOp>();
if (dmaStartOp && dmaStartOp->isSrcMemorySpaceFaster())
if (dmaStartOp && dmaStartOp.isSrcMemorySpaceFaster())
outgoingDmaOps.push_back(dmaStartOp);
}
@ -199,7 +199,7 @@ static void findMatchingStartFinishInsts(
// Only DMAs incoming into higher memory spaces are pipelined for now.
// TODO(bondhugula): handle outgoing DMA pipelining.
if (!dmaStartOp->isDestMemorySpaceFaster())
if (!dmaStartOp.isDestMemorySpaceFaster())
continue;
// Check for dependence with outgoing DMAs. Doing this conservatively.
@ -207,14 +207,14 @@ static void findMatchingStartFinishInsts(
// dependences between an incoming and outgoing DMA in the same iteration.
auto it = outgoingDmaOps.begin();
for (; it != outgoingDmaOps.end(); ++it) {
if ((*it)->getDstMemRef() == dmaStartOp->getSrcMemRef())
if (it->getDstMemRef() == dmaStartOp.getSrcMemRef())
break;
}
if (it != outgoingDmaOps.end())
continue;
// We only double buffer if the buffer is not live out of loop.
auto *memref = dmaStartOp->getOperand(dmaStartOp->getFasterMemPos());
auto *memref = dmaStartOp.getOperand(dmaStartOp.getFasterMemPos());
bool escapingUses = false;
for (const auto &use : memref->getUses()) {
// We can double buffer regardless of dealloc's outside the loop.
@ -272,7 +272,7 @@ void PipelineDataTransfer::runOnAffineForOp(AffineForOp forOp) {
for (auto &pair : startWaitPairs) {
auto *dmaStartInst = pair.first;
Value *oldMemRef = dmaStartInst->getOperand(
dmaStartInst->cast<DmaStartOp>()->getFasterMemPos());
dmaStartInst->cast<DmaStartOp>().getFasterMemPos());
if (!doubleBuffer(oldMemRef, forOp)) {
// Normally, double buffering should not fail because we already checked
// that there are no uses outside.

View File

@ -158,18 +158,18 @@ void GreedyPatternRewriteDriver::simplifyFunction() {
if (auto constant = op->dyn_cast<ConstantOp>()) {
// If this constant is dead, remove it, being careful to keep
// uniquedConstants up to date.
if (constant->use_empty()) {
if (constant.use_empty()) {
auto it =
uniquedConstants.find({constant->getValue(), constant->getType()});
uniquedConstants.find({constant.getValue(), constant.getType()});
if (it != uniquedConstants.end() && it->second == op)
uniquedConstants.erase(it);
constant->erase();
constant.erase();
continue;
}
// Check to see if we already have a constant with this type and value:
auto &entry = uniquedConstants[std::make_pair(constant->getValue(),
constant->getType())];
auto &entry = uniquedConstants[std::make_pair(constant.getValue(),
constant.getType())];
if (entry) {
// If this constant is already our uniqued one, then leave it alone.
if (entry == op)
@ -178,8 +178,8 @@ void GreedyPatternRewriteDriver::simplifyFunction() {
// Otherwise replace this redundant constant with the uniqued one. We
// know this is safe because we move constants to the top of the
// function when they are uniqued, so we know they dominate all uses.
constant->replaceAllUsesWith(entry->getResult(0));
constant->erase();
constant.replaceAllUsesWith(entry->getResult(0));
constant.erase();
continue;
}

View File

@ -819,8 +819,7 @@ template <typename LoadOrStoreOpPointer>
static LogicalResult vectorizeRootOrTerminal(Value *iv,
LoadOrStoreOpPointer memoryOp,
VectorizationState *state) {
auto memRefType =
memoryOp->getMemRef()->getType().template cast<MemRefType>();
auto memRefType = memoryOp.getMemRef()->getType().template cast<MemRefType>();
auto elementType = memRefType.getElementType();
// TODO(ntv): ponder whether we want to further vectorize a vector value.
@ -829,7 +828,7 @@ static LogicalResult vectorizeRootOrTerminal(Value *iv,
auto vectorType = VectorType::get(state->strategy->vectorSizes, elementType);
// Materialize a MemRef with 1 vector.
auto *opInst = memoryOp->getInstruction();
auto *opInst = memoryOp.getInstruction();
// For now, vector_transfers must be aligned, operate only on indices with an
// identity subset of AffineMap and do not change layout.
// TODO(ntv): increase the expressiveness power of vector_transfer operations
@ -841,9 +840,9 @@ static LogicalResult vectorizeRootOrTerminal(Value *iv,
LLVM_DEBUG(permutationMap.print(dbgs()));
FuncBuilder b(opInst);
auto transfer = b.create<VectorTransferReadOp>(
opInst->getLoc(), vectorType, memoryOp->getMemRef(),
map(makePtrDynCaster<Value>(), memoryOp->getIndices()), permutationMap);
state->registerReplacement(opInst, transfer->getInstruction());
opInst->getLoc(), vectorType, memoryOp.getMemRef(),
map(makePtrDynCaster<Value>(), memoryOp.getIndices()), permutationMap);
state->registerReplacement(opInst, transfer.getInstruction());
} else {
state->registerTerminal(opInst);
}
@ -1041,10 +1040,10 @@ static Instruction *vectorizeOneInstruction(Instruction *opInst,
"vector_transfer_write cannot be further vectorized");
if (auto store = opInst->dyn_cast<StoreOp>()) {
auto *memRef = store->getMemRef();
auto *value = store->getValueToStore();
auto *memRef = store.getMemRef();
auto *value = store.getValueToStore();
auto *vectorValue = vectorizeOperand(value, opInst, state);
auto indices = map(makePtrDynCaster<Value>(), store->getIndices());
auto indices = map(makePtrDynCaster<Value>(), store.getIndices());
FuncBuilder b(opInst);
auto permutationMap =
makePermutationMap(opInst, state->strategy->loopToVectorDim);
@ -1052,7 +1051,7 @@ static Instruction *vectorizeOneInstruction(Instruction *opInst,
LLVM_DEBUG(permutationMap.print(dbgs()));
auto transfer = b.create<VectorTransferWriteOp>(
opInst->getLoc(), vectorValue, memRef, indices, permutationMap);
auto *res = transfer->getInstruction();
auto *res = transfer.getInstruction();
LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorized store: " << *res);
// "Terminals" (i.e. StoreOps) are erased on the spot.
opInst->erase();

View File

@ -130,15 +130,15 @@ static bool emitOneBuilder(const Record &record, raw_ostream &os) {
bool isVariadicArg = isVariadicArgumentName(op, name);
if (isOperandName(op, name)) {
auto result = isVariadicArg
? formatv("lookupValues(op->{0}())", name)
: formatv("valueMapping.lookup(op->{0}())", name);
? formatv("lookupValues(op.{0}())", name)
: formatv("valueMapping.lookup(op.{0}())", name);
bs << result;
} else if (isAttributeName(op, name)) {
bs << formatv("op->{0}()", name);
bs << formatv("op.{0}()", name);
} else if (isResultName(op, name)) {
bs << formatv("valueMapping[op->{0}()]", name);
bs << formatv("valueMapping[op.{0}()]", name);
} else if (name == "_resultType") {
bs << "op->getResult()->getType().cast<LLVM::LLVMType>()."
bs << "op.getResult()->getType().cast<LLVM::LLVMType>()."
"getUnderlyingType()";
} else if (name == "_hasResult") {
bs << "inst.getNumResults() == 1";