mirror of https://github.com/llvm/circt.git
[Handshake] Use free variants of isa/cast/dyn_cast
Refer to https://mlir.llvm.org/deprecation/
This commit is contained in:
parent
8867f7b867
commit
cfaf3d79ae
|
@ -71,7 +71,7 @@ public:
|
|||
template <typename TSrcTerm, typename TDstTerm>
|
||||
LogicalResult setControlOnlyPath(ConversionPatternRewriter &rewriter,
|
||||
Value entryCtrl) {
|
||||
assert(entryCtrl.getType().isa<NoneType>() &&
|
||||
assert(isa<NoneType>(entryCtrl.getType()) &&
|
||||
"Expected NoneType for entry control value");
|
||||
// Creates start and end points of the control-only path
|
||||
Block *entryBlock = &r.front();
|
||||
|
|
|
@ -67,7 +67,7 @@ def BufferOp : DCOp<"buffer",
|
|||
let extraClassDeclaration = [{
|
||||
// Returns the data type of this buffer, if any.
|
||||
std::optional<TypeRange> getInnerType() {
|
||||
if(auto type = getInput().getType().dyn_cast<ValueType>())
|
||||
if(auto type = llvm::dyn_cast<ValueType>(getInput().getType()))
|
||||
return type.getInnerType();
|
||||
return std::nullopt;
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ def I1ValueType : Type<
|
|||
}
|
||||
|
||||
def ValueOrTokenType : Type<
|
||||
CPred<"$_self.isa<dc::ValueType, dc::TokenType>()">,
|
||||
CPred<"llvm::isa<dc::ValueType, dc::TokenType>($_self)">,
|
||||
"must be a !dc.value or !dc.token type"> {
|
||||
}
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ def SOSTInterface : OpInterface<"SOSTInterface"> {
|
|||
auto concreteOp = mlir::cast<ConcreteOp>($_op);
|
||||
// The operation is a control operation if its single data type is a
|
||||
// NoneType.
|
||||
return concreteOp.getDataType().template isa<mlir::NoneType>();
|
||||
return llvm::isa<mlir::NoneType>(concreteOp.getDataType());
|
||||
}]
|
||||
>,
|
||||
InterfaceMethod<[{
|
||||
|
|
|
@ -94,12 +94,12 @@ def FuncOp : Op<Handshake_Dialect, "func", [
|
|||
|
||||
/// Returns the argument name at the given index.
|
||||
StringAttr getArgName(unsigned idx) {
|
||||
return getArgNames()[idx].cast<StringAttr>();
|
||||
return llvm::cast<StringAttr>(getArgNames()[idx]);
|
||||
}
|
||||
|
||||
/// Returns the result name at the given index.
|
||||
StringAttr getResName(unsigned idx) {
|
||||
return getResNames()[idx].cast<StringAttr>();
|
||||
return llvm::cast<StringAttr>(getResNames()[idx]);
|
||||
}
|
||||
|
||||
/// Resolve argument and result names. This can be used during building of
|
||||
|
@ -113,7 +113,7 @@ def FuncOp : Op<Handshake_Dialect, "func", [
|
|||
/// safely.
|
||||
LogicalResult verifyType() {
|
||||
auto type = getFunctionTypeAttr().getValue();
|
||||
if (!type.isa<FunctionType>())
|
||||
if (!llvm::isa<FunctionType>(type))
|
||||
return emitOpError(
|
||||
"requires '" + getFunctionTypeAttrName().getValue() +
|
||||
"' attribute of function type");
|
||||
|
@ -296,7 +296,8 @@ def BufferOp : Handshake_Op<"buffer", [Pure, HasClock, SameOperandsAndResultType
|
|||
assert(getInitValues() && "initValues attribute not set");
|
||||
SmallVector<int64_t> values;
|
||||
for (auto value : (*this)->getAttrOfType<ArrayAttr>("initValues"))
|
||||
values.push_back(value.cast<IntegerAttr>().getValue().getSExtValue());
|
||||
values.push_back(llvm::cast<IntegerAttr>(value)
|
||||
.getValue().getSExtValue());
|
||||
return values;
|
||||
}
|
||||
|
||||
|
@ -665,7 +666,7 @@ def ExternalMemoryOp : Handshake_Op<"extmemory", [
|
|||
|
||||
let extraClassDeclaration = [{
|
||||
mlir::MemRefType getMemRefType() {
|
||||
return getMemref().getType().cast<mlir::MemRefType>();
|
||||
return llvm::cast<mlir::MemRefType>(getMemref().getType());
|
||||
}
|
||||
|
||||
llvm::SmallVector<handshake::MemLoadInterface> getLoadPorts();
|
||||
|
@ -793,7 +794,7 @@ def UnpackOp : Handshake_Op<"unpack", [
|
|||
DeclareOpInterfaceMethods<GeneralOpInterface>,
|
||||
TypesMatchWith<"result types match element types of 'tuple'",
|
||||
"input", "results",
|
||||
"$_self.cast<TupleType>().getTypes()">
|
||||
"llvm::cast<TupleType>($_self).getTypes()">
|
||||
]> {
|
||||
let summary = "unpacks a tuple";
|
||||
let description = [{
|
||||
|
@ -815,7 +816,7 @@ def UnpackOp : Handshake_Op<"unpack", [
|
|||
let builders = [OpBuilder<(ins "Value":$input), [{
|
||||
$_state.addOperands(input);
|
||||
|
||||
TupleType type = input.getType().dyn_cast_or_null<TupleType>();
|
||||
TupleType type = llvm::dyn_cast_or_null<TupleType>(input.getType());
|
||||
assert(type && "expect unpack to have a 'TupleType' operand");
|
||||
$_state.addTypes(type.getTypes());
|
||||
}]>];
|
||||
|
@ -828,7 +829,7 @@ def PackOp : Handshake_Op<"pack", [
|
|||
DeclareOpInterfaceMethods<GeneralOpInterface>,
|
||||
TypesMatchWith<"input types match element types of 'tuple'",
|
||||
"result", "inputs",
|
||||
"$_self.cast<TupleType>().getTypes()">
|
||||
"llvm::cast<TupleType>($_self).getTypes()">
|
||||
]> {
|
||||
|
||||
let summary = "packs a tuple";
|
||||
|
|
|
@ -361,7 +361,7 @@ HandshakeLowering::insertMergeOps(HandshakeLowering::ValueMap &mergePairs,
|
|||
// thanks to prior SSA maximization
|
||||
for (auto &arg : block.getArguments()) {
|
||||
// No merges on memref block arguments; these are handled separately
|
||||
if (arg.getType().isa<mlir::MemRefType>())
|
||||
if (isa<mlir::MemRefType>(arg.getType()))
|
||||
continue;
|
||||
|
||||
auto mergeInfo = insertMerge(&block, arg, edgeBuilder, rewriter);
|
||||
|
@ -383,7 +383,7 @@ static Value getMergeOperand(HandshakeLowering::MergeOpInfo mergeInfo,
|
|||
// The block terminator is either a cf-level branch or cf-level conditional
|
||||
// branch. In either case, identify the value passed to the block using its
|
||||
// index in the list of block arguments
|
||||
unsigned index = srcVal.cast<BlockArgument>().getArgNumber();
|
||||
unsigned index = cast<BlockArgument>(srcVal).getArgNumber();
|
||||
Operation *termOp = predBlock->getTerminator();
|
||||
if (mlir::cf::CondBranchOp br = dyn_cast<mlir::cf::CondBranchOp>(termOp)) {
|
||||
// Block should be one of the two destinations of the conditional branch
|
||||
|
@ -699,7 +699,7 @@ BufferOp FeedForwardNetworkRewriter::buildSplitNetwork(
|
|||
std::back_inserter(branches));
|
||||
|
||||
auto *findRes = llvm::find_if(branches, [](auto br) {
|
||||
return br.getDataOperand().getType().template isa<NoneType>();
|
||||
return llvm::isa<NoneType>(br.getDataOperand().getType());
|
||||
});
|
||||
|
||||
assert(findRes && "expected one branch for the ctrl signal");
|
||||
|
@ -1212,7 +1212,7 @@ struct BlockControlTerm {
|
|||
BlockControlTerm(Operation *op, Value ctrlOperand)
|
||||
: op(op), ctrlOperand(ctrlOperand) {
|
||||
assert(op && ctrlOperand);
|
||||
assert(ctrlOperand.getType().isa<NoneType>() &&
|
||||
assert(isa<NoneType>(ctrlOperand.getType()) &&
|
||||
"Control operand must be a NoneType");
|
||||
}
|
||||
|
||||
|
@ -1515,10 +1515,10 @@ HandshakeLowering::connectToMemory(ConversionPatternRewriter &rewriter,
|
|||
|
||||
// A memory is external if the memref that defines it is provided as a
|
||||
// function (block) argument.
|
||||
bool isExternalMemory = memrefOperand.isa<BlockArgument>();
|
||||
bool isExternalMemory = isa<BlockArgument>(memrefOperand);
|
||||
|
||||
mlir::MemRefType memrefType =
|
||||
memrefOperand.getType().cast<mlir::MemRefType>();
|
||||
cast<mlir::MemRefType>(memrefOperand.getType());
|
||||
if (failed(isValidMemrefType(memrefOperand.getLoc(), memrefType)))
|
||||
return failure();
|
||||
|
||||
|
@ -1650,7 +1650,7 @@ namespace {
|
|||
class HandshakeLoweringSSAStrategy : public SSAMaximizationStrategy {
|
||||
/// Filters out block arguments of type MemRefType
|
||||
bool maximizeArgument(BlockArgument arg) override {
|
||||
return !arg.getType().isa<mlir::MemRefType>();
|
||||
return !isa<mlir::MemRefType>(arg.getType());
|
||||
}
|
||||
|
||||
/// Filters out allocation operations
|
||||
|
|
|
@ -45,7 +45,7 @@ static Type tupleToStruct(TupleType tuple) {
|
|||
mlir::SmallVector<hw::StructType::FieldInfo, 8> hwfields;
|
||||
for (auto [i, innerType] : llvm::enumerate(tuple)) {
|
||||
Type convertedInnerType = innerType;
|
||||
if (auto tupleInnerType = innerType.dyn_cast<TupleType>())
|
||||
if (auto tupleInnerType = dyn_cast<TupleType>(innerType))
|
||||
convertedInnerType = tupleToStruct(tupleInnerType);
|
||||
hwfields.push_back(
|
||||
{StringAttr::get(ctx, "field" + Twine(i)), convertedInnerType});
|
||||
|
@ -393,7 +393,7 @@ struct RTLBuilder {
|
|||
// Start the mux tree with zero value.
|
||||
auto dataType = inputs[0].getType();
|
||||
unsigned width =
|
||||
dataType.isa<NoneType>() ? 0 : dataType.getIntOrFloatBitWidth();
|
||||
isa<NoneType>(dataType) ? 0 : dataType.getIntOrFloatBitWidth();
|
||||
Value muxValue = constant(width, 0);
|
||||
|
||||
// Iteratively chain together muxes from the high bit to the low bit.
|
||||
|
@ -413,9 +413,9 @@ struct RTLBuilder {
|
|||
};
|
||||
|
||||
static bool isZeroWidthType(Type type) {
|
||||
if (auto intType = type.dyn_cast<IntegerType>())
|
||||
if (auto intType = dyn_cast<IntegerType>(type))
|
||||
return intType.getWidth() == 0;
|
||||
return type.isa<NoneType>();
|
||||
return isa<NoneType>(type);
|
||||
}
|
||||
|
||||
static UnwrappedIO unwrapIO(Location loc, ValueRange operands,
|
||||
|
@ -785,7 +785,7 @@ public:
|
|||
|
||||
} // namespace
|
||||
|
||||
static bool isDCType(Type type) { return type.isa<TokenType, ValueType>(); }
|
||||
static bool isDCType(Type type) { return isa<TokenType, ValueType>(type); }
|
||||
|
||||
/// Returns true if the given `op` is considered as legal - i.e. it does not
|
||||
/// contain any dc-typed values.
|
||||
|
@ -814,7 +814,7 @@ public:
|
|||
// Check whether this precondition is met, and if not, exit.
|
||||
auto walkRes = parent->walk([&](Operation *op) {
|
||||
for (auto res : op->getResults()) {
|
||||
if (res.getType().isa<dc::TokenType, dc::ValueType>()) {
|
||||
if (isa<dc::TokenType, dc::ValueType>(res.getType())) {
|
||||
if (res.use_empty()) {
|
||||
op->emitOpError() << "DCToHW: value " << res << " is unused.";
|
||||
return WalkResult::interrupt();
|
||||
|
|
|
@ -47,9 +47,9 @@ struct DCTuple {
|
|||
|
||||
// Unpack a !dc.value<...> into a DCTuple.
|
||||
static DCTuple unpack(OpBuilder &b, Value v) {
|
||||
if (v.getType().isa<dc::ValueType>())
|
||||
if (isa<dc::ValueType>(v.getType()))
|
||||
return DCTuple(b.create<dc::UnpackOp>(v.getLoc(), v));
|
||||
assert(v.getType().isa<dc::TokenType>() && "Expected a dc::TokenType");
|
||||
assert(isa<dc::TokenType>(v.getType()) && "Expected a dc::TokenType");
|
||||
return DCTuple(v, {});
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,7 @@ class DCTypeConverter : public TypeConverter {
|
|||
public:
|
||||
DCTypeConverter() {
|
||||
addConversion([](Type type) -> Type {
|
||||
if (type.isa<NoneType>())
|
||||
if (isa<NoneType>(type))
|
||||
return dc::TokenType::get(type.getContext());
|
||||
return dc::ValueType::get(type.getContext(), type);
|
||||
});
|
||||
|
@ -78,12 +78,12 @@ public:
|
|||
return std::nullopt;
|
||||
|
||||
// Materialize !dc.value<> -> !dc.token
|
||||
if (resultType.isa<dc::TokenType>() &&
|
||||
inputs.front().getType().isa<dc::ValueType>())
|
||||
if (isa<dc::TokenType>(resultType) &&
|
||||
isa<dc::ValueType>(inputs.front().getType()))
|
||||
return unpack(builder, inputs.front()).token;
|
||||
|
||||
// Materialize !dc.token -> !dc.value<>
|
||||
auto vt = resultType.dyn_cast<dc::ValueType>();
|
||||
auto vt = dyn_cast<dc::ValueType>(resultType);
|
||||
if (vt && !vt.getInnerType())
|
||||
return pack(builder, inputs.front());
|
||||
|
||||
|
@ -98,12 +98,12 @@ public:
|
|||
return std::nullopt;
|
||||
|
||||
// Materialize !dc.value<> -> !dc.token
|
||||
if (resultType.isa<dc::TokenType>() &&
|
||||
inputs.front().getType().isa<dc::ValueType>())
|
||||
if (isa<dc::TokenType>(resultType) &&
|
||||
isa<dc::ValueType>(inputs.front().getType()))
|
||||
return unpack(builder, inputs.front()).token;
|
||||
|
||||
// Materialize !dc.token -> !dc.value<>
|
||||
auto vt = resultType.dyn_cast<dc::ValueType>();
|
||||
auto vt = dyn_cast<dc::ValueType>(resultType);
|
||||
if (vt && !vt.getInnerType())
|
||||
return pack(builder, inputs.front());
|
||||
|
||||
|
@ -238,7 +238,7 @@ public:
|
|||
|
||||
// if the original op used `index` as the select operand type, we need to
|
||||
// index-cast the unpacked select operand
|
||||
if (op.getIndex().getType().isa<IndexType>()) {
|
||||
if (isa<IndexType>(op.getIndex().getType())) {
|
||||
selValue = rewriter.create<arith::IndexCastOp>(
|
||||
op.getLoc(), rewriter.getIndexType(), selValue);
|
||||
convertedOps->insert(selValue.getDefiningOp());
|
||||
|
@ -433,9 +433,9 @@ public:
|
|||
auto select = unpack(rewriter, adaptor.getSelectOperand());
|
||||
auto selectData = select.data;
|
||||
auto selectToken = select.token;
|
||||
bool isIndexType = selectData.getType().isa<IndexType>();
|
||||
bool isIndexType = isa<IndexType>(selectData.getType());
|
||||
|
||||
bool withData = !op.getResult().getType().isa<NoneType>();
|
||||
bool withData = !isa<NoneType>(op.getResult().getType());
|
||||
|
||||
llvm::SmallVector<DCTuple> inputs;
|
||||
for (auto input : adaptor.getDataOperands())
|
||||
|
@ -461,7 +461,7 @@ public:
|
|||
if (isIndexType) {
|
||||
cmpIndex = rewriter.create<arith::ConstantIndexOp>(op.getLoc(), i);
|
||||
} else {
|
||||
size_t width = selectData.getType().cast<IntegerType>().getWidth();
|
||||
size_t width = cast<IntegerType>(selectData.getType()).getWidth();
|
||||
cmpIndex = rewriter.create<arith::ConstantIntOp>(op.getLoc(), i, width);
|
||||
}
|
||||
auto inputSelected = rewriter.create<arith::CmpIOp>(
|
||||
|
|
|
@ -102,7 +102,7 @@ static std::string getCallName(Operation *op) {
|
|||
/// assume that opType itself is the data-carrying type.
|
||||
static Type getOperandDataType(Value op) {
|
||||
auto opType = op.getType();
|
||||
if (auto channelType = opType.dyn_cast<esi::ChannelType>())
|
||||
if (auto channelType = dyn_cast<esi::ChannelType>(opType))
|
||||
return channelType.getInner();
|
||||
return opType;
|
||||
}
|
||||
|
@ -111,7 +111,7 @@ static Type getOperandDataType(Value op) {
|
|||
static SmallVector<Type> filterNoneTypes(ArrayRef<Type> input) {
|
||||
SmallVector<Type> filterRes;
|
||||
llvm::copy_if(input, std::back_inserter(filterRes),
|
||||
[](Type type) { return !type.isa<NoneType>(); });
|
||||
[](Type type) { return !isa<NoneType>(type); });
|
||||
return filterRes;
|
||||
}
|
||||
|
||||
|
@ -145,17 +145,17 @@ static std::string getTypeName(Location loc, Type type) {
|
|||
std::string typeName;
|
||||
// Builtin types
|
||||
if (type.isIntOrIndex()) {
|
||||
if (auto indexType = type.dyn_cast<IndexType>())
|
||||
if (auto indexType = dyn_cast<IndexType>(type))
|
||||
typeName += "_ui" + std::to_string(indexType.kInternalStorageBitWidth);
|
||||
else if (type.isSignedInteger())
|
||||
typeName += "_si" + std::to_string(type.getIntOrFloatBitWidth());
|
||||
else
|
||||
typeName += "_ui" + std::to_string(type.getIntOrFloatBitWidth());
|
||||
} else if (auto tupleType = type.dyn_cast<TupleType>()) {
|
||||
} else if (auto tupleType = dyn_cast<TupleType>(type)) {
|
||||
typeName += "_tuple";
|
||||
for (auto elementType : tupleType.getTypes())
|
||||
typeName += getTypeName(loc, elementType);
|
||||
} else if (auto structType = type.dyn_cast<hw::StructType>()) {
|
||||
} else if (auto structType = dyn_cast<hw::StructType>(type)) {
|
||||
typeName += "_struct";
|
||||
for (auto element : structType.getElements())
|
||||
typeName += "_" + element.name.str() + getTypeName(loc, element.type);
|
||||
|
@ -174,7 +174,7 @@ static std::string getSubModuleName(Operation *oldOp) {
|
|||
|
||||
// Add value of the constant operation.
|
||||
if (auto constOp = dyn_cast<handshake::ConstantOp>(oldOp)) {
|
||||
if (auto intAttr = constOp.getValue().dyn_cast<IntegerAttr>()) {
|
||||
if (auto intAttr = dyn_cast<IntegerAttr>(constOp.getValue())) {
|
||||
auto intType = intAttr.getType();
|
||||
|
||||
if (intType.isSignedInteger())
|
||||
|
@ -218,9 +218,9 @@ static std::string getSubModuleName(Operation *oldOp) {
|
|||
if (auto initValues = bufferOp.getInitValues()) {
|
||||
subModuleName += "_init";
|
||||
for (const Attribute e : *initValues) {
|
||||
assert(e.isa<IntegerAttr>());
|
||||
assert(isa<IntegerAttr>(e));
|
||||
subModuleName +=
|
||||
"_" + std::to_string(e.dyn_cast<IntegerAttr>().getInt());
|
||||
"_" + std::to_string(dyn_cast<IntegerAttr>(e).getInt());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -295,8 +295,8 @@ static LogicalResult convertExtMemoryOps(HWModuleOp mod) {
|
|||
// Gather memref ports to be converted.
|
||||
llvm::DenseMap<unsigned, Value> memrefPorts;
|
||||
for (auto [i, arg] : llvm::enumerate(mod.getBodyBlock()->getArguments())) {
|
||||
auto channel = arg.getType().dyn_cast<esi::ChannelType>();
|
||||
if (channel && channel.getInner().isa<MemRefType>())
|
||||
auto channel = dyn_cast<esi::ChannelType>(arg.getType());
|
||||
if (channel && isa<MemRefType>(channel.getInner()))
|
||||
memrefPorts[i] = arg;
|
||||
}
|
||||
|
||||
|
@ -573,7 +573,7 @@ struct RTLBuilder {
|
|||
|
||||
// Unpacks a hw.struct into a list of values.
|
||||
ValueRange unpack(Value value) {
|
||||
auto structType = value.getType().cast<hw::StructType>();
|
||||
auto structType = cast<hw::StructType>(value.getType());
|
||||
llvm::SmallVector<Type> innerTypes;
|
||||
structType.getInnerTypes(innerTypes);
|
||||
return b.create<hw::StructExplodeOp>(loc, innerTypes, value).getResults();
|
||||
|
@ -660,7 +660,7 @@ struct RTLBuilder {
|
|||
// Todo: clean up when handshake supports i0.
|
||||
auto dataType = inputs[0].getType();
|
||||
unsigned width =
|
||||
dataType.isa<NoneType>() ? 0 : dataType.getIntOrFloatBitWidth();
|
||||
isa<NoneType>(dataType) ? 0 : dataType.getIntOrFloatBitWidth();
|
||||
Value muxValue = constant(width, 0);
|
||||
|
||||
// Iteratively chain together muxes from the high bit to the low bit.
|
||||
|
|
|
@ -19,7 +19,7 @@ using namespace dc;
|
|||
using namespace mlir;
|
||||
|
||||
bool circt::dc::isI1ValueType(Type t) {
|
||||
auto vt = t.dyn_cast<ValueType>();
|
||||
auto vt = dyn_cast<ValueType>(t);
|
||||
if (!vt)
|
||||
return false;
|
||||
auto innerWidth = vt.getInnerType().getIntOrFloatBitWidth();
|
||||
|
@ -242,7 +242,7 @@ LogicalResult UnpackOp::inferReturnTypes(
|
|||
MLIRContext *context, std::optional<Location> loc, ValueRange operands,
|
||||
DictionaryAttr attrs, mlir::OpaqueProperties properties,
|
||||
mlir::RegionRange regions, SmallVectorImpl<Type> &results) {
|
||||
auto inputType = operands.front().getType().cast<ValueType>();
|
||||
auto inputType = cast<ValueType>(operands.front().getType());
|
||||
results.push_back(TokenType::get(context));
|
||||
results.push_back(inputType.getInnerType());
|
||||
return success();
|
||||
|
@ -331,7 +331,7 @@ FailureOr<SmallVector<int64_t>> BufferOp::getInitValueArray() {
|
|||
assert(getInitValues() && "initValues attribute not set");
|
||||
SmallVector<int64_t> values;
|
||||
for (auto value : getInitValuesAttr()) {
|
||||
if (auto iValue = value.dyn_cast<IntegerAttr>()) {
|
||||
if (auto iValue = dyn_cast<IntegerAttr>(value)) {
|
||||
values.push_back(iValue.getValue().getSExtValue());
|
||||
} else {
|
||||
return emitError() << "initValues attribute must be an array of integers";
|
||||
|
@ -362,7 +362,7 @@ LogicalResult ToESIOp::inferReturnTypes(
|
|||
DictionaryAttr attrs, mlir::OpaqueProperties properties,
|
||||
mlir::RegionRange regions, SmallVectorImpl<Type> &results) {
|
||||
Type channelEltType;
|
||||
if (auto valueType = operands.front().getType().dyn_cast<ValueType>())
|
||||
if (auto valueType = dyn_cast<ValueType>(operands.front().getType()))
|
||||
channelEltType = valueType.getInnerType();
|
||||
else {
|
||||
// dc.token => esi.channel<i0>
|
||||
|
@ -382,8 +382,8 @@ LogicalResult FromESIOp::inferReturnTypes(
|
|||
DictionaryAttr attrs, mlir::OpaqueProperties properties,
|
||||
mlir::RegionRange regions, SmallVectorImpl<Type> &results) {
|
||||
auto innerType =
|
||||
operands.front().getType().cast<esi::ChannelType>().getInner();
|
||||
if (auto intType = innerType.dyn_cast<IntegerType>(); intType.getWidth() == 0)
|
||||
cast<esi::ChannelType>(operands.front().getType()).getInner();
|
||||
if (auto intType = dyn_cast<IntegerType>(innerType); intType.getWidth() == 0)
|
||||
results.push_back(dc::TokenType::get(context));
|
||||
else
|
||||
results.push_back(dc::ValueType::get(context, innerType));
|
||||
|
|
|
@ -19,7 +19,7 @@ using namespace dc;
|
|||
using namespace mlir;
|
||||
|
||||
static bool isDCTyped(Value v) {
|
||||
return v.getType().isa<dc::TokenType, dc::ValueType>();
|
||||
return isa<dc::TokenType, dc::ValueType>(v.getType());
|
||||
}
|
||||
|
||||
static void replaceFirstUse(Operation *op, Value oldVal, Value newVal) {
|
||||
|
@ -33,7 +33,7 @@ static void replaceFirstUse(Operation *op, Value oldVal, Value newVal) {
|
|||
// Adds a sink to the provided token or value-typed Value `v`.
|
||||
static void insertSink(Value v, OpBuilder &rewriter) {
|
||||
rewriter.setInsertionPointAfterValue(v);
|
||||
if (v.getType().isa<ValueType>()) {
|
||||
if (isa<ValueType>(v.getType())) {
|
||||
// Unpack before sinking
|
||||
v = rewriter.create<UnpackOp>(v.getLoc(), v).getToken();
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ static void insertFork(Value result, OpBuilder &rewriter) {
|
|||
for (auto &u : result.getUses())
|
||||
opsToProcess.push_back(u.getOwner());
|
||||
|
||||
bool isValue = result.getType().isa<ValueType>();
|
||||
bool isValue = isa<ValueType>(result.getType());
|
||||
Value token = result;
|
||||
Value value;
|
||||
if (isValue) {
|
||||
|
|
|
@ -86,7 +86,7 @@ static LogicalResult verifyIndexWideEnough(Operation *op, Value indexVal,
|
|||
unsigned indexWidth;
|
||||
|
||||
// Determine the bitwidth of the indexing value
|
||||
if (auto integerType = indexType.dyn_cast<IntegerType>())
|
||||
if (auto integerType = dyn_cast<IntegerType>(indexType))
|
||||
indexWidth = integerType.getWidth();
|
||||
else if (indexType.isIndex())
|
||||
indexWidth = IndexType::kInternalStorageBitWidth;
|
||||
|
@ -107,7 +107,7 @@ static LogicalResult verifyIndexWideEnough(Operation *op, Value indexVal,
|
|||
static bool isControlCheckTypeAndOperand(Type dataType, Value operand) {
|
||||
// The operation is a control operation if its operand data type is a
|
||||
// NoneType.
|
||||
if (dataType.isa<NoneType>())
|
||||
if (isa<NoneType>(dataType))
|
||||
return true;
|
||||
|
||||
// Otherwise, the operation is a control operation if the operation's
|
||||
|
@ -396,7 +396,7 @@ MuxOp::inferReturnTypes(MLIRContext *context, std::optional<Location> location,
|
|||
return success();
|
||||
}
|
||||
|
||||
bool MuxOp::isControl() { return getResult().getType().isa<NoneType>(); }
|
||||
bool MuxOp::isControl() { return isa<NoneType>(getResult().getType()); }
|
||||
|
||||
std::string handshake::MuxOp::getOperandName(unsigned int idx) {
|
||||
return idx == 0 ? "select" : defaultOperandName(idx - 1);
|
||||
|
@ -520,7 +520,7 @@ LogicalResult FuncOp::verify() {
|
|||
<< ".";
|
||||
|
||||
if (llvm::any_of(portNames,
|
||||
[&](Attribute attr) { return !attr.isa<StringAttr>(); }))
|
||||
[&](Attribute attr) { return !isa<StringAttr>(attr); }))
|
||||
return emitOpError() << "expected all entries in attribute '" << attrName
|
||||
<< "' to be strings.";
|
||||
|
||||
|
@ -533,7 +533,7 @@ LogicalResult FuncOp::verify() {
|
|||
|
||||
// Verify that all memrefs have a corresponding extmemory operation
|
||||
for (auto arg : entryBlock.getArguments()) {
|
||||
if (!arg.getType().isa<MemRefType>())
|
||||
if (!isa<MemRefType>(arg.getType()))
|
||||
continue;
|
||||
if (arg.getUsers().empty() ||
|
||||
!isa<ExternalMemoryOp>(*arg.getUsers().begin()))
|
||||
|
@ -620,9 +620,8 @@ void handshake::FuncOp::resolveArgAndResNames() {
|
|||
StringRef attrName) {
|
||||
for (auto fallbackName : llvm::enumerate(fallbackNames)) {
|
||||
if (actualNames.size() <= fallbackName.index())
|
||||
addStringToStringArrayAttr(
|
||||
builder, this->getOperation(), attrName,
|
||||
fallbackName.value().template cast<StringAttr>());
|
||||
addStringToStringArrayAttr(builder, this->getOperation(), attrName,
|
||||
cast<StringAttr>(fallbackName.value()));
|
||||
}
|
||||
};
|
||||
resolveNames(fallbackArgNames, argNames, "argNames");
|
||||
|
@ -871,7 +870,7 @@ void SourceOp::print(OpAsmPrinter &p) {
|
|||
|
||||
LogicalResult ConstantOp::verify() {
|
||||
// Verify that the type of the provided value is equal to the result type.
|
||||
auto typedValue = getValue().dyn_cast<mlir::TypedAttr>();
|
||||
auto typedValue = dyn_cast<mlir::TypedAttr>(getValue());
|
||||
if (!typedValue)
|
||||
return emitOpError("constant value must be a typed attribute; value is ")
|
||||
<< getValue();
|
||||
|
@ -1045,13 +1044,13 @@ LogicalResult MemoryOp::verify() {
|
|||
}
|
||||
for (unsigned i = 0; i < opStCount; i++) {
|
||||
Type syncType = outputType[opLdCount + i];
|
||||
if (!syncType.isa<NoneType>())
|
||||
if (!isa<NoneType>(syncType))
|
||||
return emitOpError("data type for sync port for store port ")
|
||||
<< i << ":" << syncType << " is not 'none'";
|
||||
}
|
||||
for (unsigned i = 0; i < opLdCount; i++) {
|
||||
Type syncType = outputType[opLdCount + opStCount + i];
|
||||
if (!syncType.isa<NoneType>())
|
||||
if (!isa<NoneType>(syncType))
|
||||
return emitOpError("data type for sync port for load port ")
|
||||
<< i << ":" << syncType << " is not 'none'";
|
||||
}
|
||||
|
@ -1078,7 +1077,7 @@ void ExternalMemoryOp::build(OpBuilder &builder, OperationState &result,
|
|||
llvm::append_range(ops, inputs);
|
||||
result.addOperands(ops);
|
||||
|
||||
auto memrefType = memref.getType().cast<MemRefType>();
|
||||
auto memrefType = cast<MemRefType>(memref.getType());
|
||||
|
||||
// Data outputs (get their type from memref)
|
||||
result.types.append(ldCount, memrefType.getElementType());
|
||||
|
@ -1108,7 +1107,7 @@ void MemoryOp::build(OpBuilder &builder, OperationState &result,
|
|||
bool lsq, int id, Value memref) {
|
||||
result.addOperands(operands);
|
||||
|
||||
auto memrefType = memref.getType().cast<MemRefType>();
|
||||
auto memrefType = cast<MemRefType>(memref.getType());
|
||||
|
||||
// Data outputs (get their type from memref)
|
||||
result.types.append(outputs, memrefType.getElementType());
|
||||
|
@ -1166,9 +1165,9 @@ bool handshake::MemoryOp::allocateMemory(
|
|||
mlir::Type elementType = type.getElementType();
|
||||
int width = elementType.getIntOrFloatBitWidth();
|
||||
for (int i = 0; i < allocationSize; i++) {
|
||||
if (elementType.isa<mlir::IntegerType>()) {
|
||||
if (isa<mlir::IntegerType>(elementType)) {
|
||||
store[ptr][i] = APInt(width, 0);
|
||||
} else if (elementType.isa<mlir::FloatType>()) {
|
||||
} else if (isa<mlir::FloatType>(elementType)) {
|
||||
store[ptr][i] = APFloat(0.0);
|
||||
} else {
|
||||
llvm_unreachable("Unknown result type!\n");
|
||||
|
@ -1207,7 +1206,7 @@ void handshake::LoadOp::build(OpBuilder &builder, OperationState &result,
|
|||
result.addOperands(indices);
|
||||
|
||||
// Data type
|
||||
auto memrefType = memref.getType().cast<MemRefType>();
|
||||
auto memrefType = cast<MemRefType>(memref.getType());
|
||||
|
||||
// Data output (from load to successor ops)
|
||||
result.types.push_back(memrefType.getElementType());
|
||||
|
|
|
@ -51,7 +51,7 @@ struct HandshakeRemoveBuffersPass
|
|||
static bool shouldBufferArgument(BlockArgument arg) {
|
||||
// At the moment, buffers only make sense on arguments which we know
|
||||
// will lower down to a handshake bundle.
|
||||
return arg.getType().isIntOrFloat() || arg.getType().isa<NoneType>();
|
||||
return arg.getType().isIntOrFloat() || isa<NoneType>(arg.getType());
|
||||
}
|
||||
|
||||
static bool isUnbufferedChannel(Operation *definingOp, Operation *usingOp) {
|
||||
|
|
|
@ -51,7 +51,7 @@ struct HandshakeLegalizeMemrefsPass
|
|||
auto loc = copy.getLoc();
|
||||
auto src = copy.getSource();
|
||||
auto dst = copy.getTarget();
|
||||
auto memrefType = src.getType().cast<MemRefType>();
|
||||
auto memrefType = cast<MemRefType>(src.getType());
|
||||
if (!isUniDimensional(memrefType)) {
|
||||
llvm::errs() << "Cannot legalize multi-dimensional memref operation "
|
||||
<< copy
|
||||
|
|
|
@ -58,7 +58,7 @@ struct StoreNames {
|
|||
} // namespace
|
||||
|
||||
static Type indexToMemAddr(Type t, MemRefType memRef) {
|
||||
assert(t.isa<IndexType>() && "Expected index type");
|
||||
assert(isa<IndexType>(t) && "Expected index type");
|
||||
auto shape = memRef.getShape();
|
||||
assert(shape.size() == 1 && "Expected 1D memref");
|
||||
unsigned addrWidth = llvm::Log2_64_Ceil(shape[0]);
|
||||
|
@ -67,13 +67,13 @@ static Type indexToMemAddr(Type t, MemRefType memRef) {
|
|||
|
||||
static HandshakeMemType getMemTypeForExtmem(Value v) {
|
||||
auto *ctx = v.getContext();
|
||||
assert(v.getType().isa<mlir::MemRefType>() && "Value is not a memref type");
|
||||
assert(isa<mlir::MemRefType>(v.getType()) && "Value is not a memref type");
|
||||
auto extmemOp = cast<handshake::ExternalMemoryOp>(*v.getUsers().begin());
|
||||
HandshakeMemType memType;
|
||||
llvm::SmallVector<hw::detail::FieldInfo> inFields, outFields;
|
||||
|
||||
// Add memory type.
|
||||
memType.memRefType = v.getType().cast<MemRefType>();
|
||||
memType.memRefType = cast<MemRefType>(v.getType());
|
||||
memType.loadPorts = extmemOp.getLdCount();
|
||||
memType.storePorts = extmemOp.getStCount();
|
||||
|
||||
|
@ -295,7 +295,7 @@ LogicalResult HandshakeLowerExtmemToHWPass::wrapESI(
|
|||
// lowering.
|
||||
static Value truncateToMemoryWidth(Location loc, OpBuilder &b, Value v,
|
||||
MemRefType memRefType) {
|
||||
assert(v.getType().isa<IndexType>() && "Expected an index-typed value");
|
||||
assert(isa<IndexType>(v.getType()) && "Expected an index-typed value");
|
||||
auto addrWidth = llvm::Log2_64_Ceil(memRefType.getShape().front());
|
||||
return b.create<arith::IndexCastOp>(loc, b.getIntegerType(addrWidth), v);
|
||||
}
|
||||
|
@ -330,7 +330,7 @@ static Value plumbStorePort(Location loc, OpBuilder &b,
|
|||
truncateToMemoryWidth(loc, b, stif.addressIn, memrefType), stif.dataIn};
|
||||
|
||||
return b
|
||||
.create<hw::StructCreateOp>(loc, outType.cast<hw::StructType>(),
|
||||
.create<hw::StructCreateOp>(loc, cast<hw::StructType>(outType),
|
||||
structArgs)
|
||||
.getResult();
|
||||
}
|
||||
|
@ -377,7 +377,7 @@ HandshakeLowerExtmemToHWPass::lowerExtmemToHW(handshake::FuncOp func) {
|
|||
// iterated from lo to hi indices.
|
||||
std::map<unsigned, Value> memrefArgs;
|
||||
for (auto [i, arg] : llvm::enumerate(func.getArguments()))
|
||||
if (arg.getType().isa<MemRefType>())
|
||||
if (isa<MemRefType>(arg.getType()))
|
||||
memrefArgs[i] = arg;
|
||||
|
||||
if (memrefArgs.empty())
|
||||
|
@ -403,7 +403,7 @@ HandshakeLowerExtmemToHWPass::lowerExtmemToHW(handshake::FuncOp func) {
|
|||
|
||||
// Add memory input - this is the output of the extmemory op.
|
||||
auto memIOTypes = getMemTypeForExtmem(arg);
|
||||
MemRefType memrefType = arg.getType().cast<MemRefType>();
|
||||
MemRefType memrefType = cast<MemRefType>(arg.getType());
|
||||
|
||||
auto oldReturnOp =
|
||||
cast<handshake::ReturnOp>(func.getBody().front().getTerminator());
|
||||
|
|
|
@ -143,7 +143,7 @@ static Type tupleToStruct(TupleType tuple) {
|
|||
mlir::SmallVector<hw::StructType::FieldInfo, 8> hwfields;
|
||||
for (auto [i, innerType] : llvm::enumerate(tuple)) {
|
||||
Type convertedInnerType = innerType;
|
||||
if (auto tupleInnerType = innerType.dyn_cast<TupleType>())
|
||||
if (auto tupleInnerType = dyn_cast<TupleType>(innerType))
|
||||
convertedInnerType = tupleToStruct(tupleInnerType);
|
||||
hwfields.push_back({StringAttr::get(ctx, "field" + std::to_string(i)),
|
||||
convertedInnerType});
|
||||
|
|
|
@ -91,7 +91,7 @@ Any readValueWithType(mlir::Type type, std::stringstream &arg) {
|
|||
int64_t width = INDEX_WIDTH;
|
||||
APInt aparg(width, x);
|
||||
return aparg;
|
||||
} else if (type.isa<mlir::IntegerType>()) {
|
||||
} else if (isa<mlir::IntegerType>(type)) {
|
||||
int64_t x;
|
||||
arg >> x;
|
||||
int64_t width = type.getIntOrFloatBitWidth();
|
||||
|
@ -107,7 +107,7 @@ Any readValueWithType(mlir::Type type, std::stringstream &arg) {
|
|||
arg >> x;
|
||||
APFloat aparg(x);
|
||||
return aparg;
|
||||
} else if (auto tupleType = type.dyn_cast<TupleType>()) {
|
||||
} else if (auto tupleType = dyn_cast<TupleType>(type)) {
|
||||
char tmp;
|
||||
arg >> tmp;
|
||||
assert(tmp == '(' && "tuple should start with '('");
|
||||
|
@ -138,13 +138,13 @@ Any readValueWithType(mlir::Type type, std::string in) {
|
|||
|
||||
void printAnyValueWithType(llvm::raw_ostream &out, mlir::Type type,
|
||||
Any &value) {
|
||||
if (type.isa<mlir::IntegerType>() || type.isa<mlir::IndexType>()) {
|
||||
if (isa<mlir::IntegerType>(type) || isa<mlir::IndexType>(type)) {
|
||||
out << any_cast<APInt>(value).getSExtValue();
|
||||
} else if (type.isa<mlir::FloatType>()) {
|
||||
} else if (isa<mlir::FloatType>(type)) {
|
||||
out << any_cast<APFloat>(value).convertToDouble();
|
||||
} else if (type.isa<mlir::NoneType>()) {
|
||||
} else if (isa<mlir::NoneType>(type)) {
|
||||
out << "none";
|
||||
} else if (auto tupleType = type.dyn_cast<mlir::TupleType>()) {
|
||||
} else if (auto tupleType = dyn_cast<mlir::TupleType>(type)) {
|
||||
auto values = any_cast<std::vector<llvm::Any>>(value);
|
||||
out << "(";
|
||||
llvm::interleaveComma(llvm::zip(tupleType.getTypes(), values), out,
|
||||
|
@ -198,9 +198,9 @@ unsigned allocateMemRef(mlir::MemRefType type, std::vector<Any> &in,
|
|||
mlir::Type elementType = type.getElementType();
|
||||
int64_t width = elementType.getIntOrFloatBitWidth();
|
||||
for (int i = 0; i < allocationSize; ++i) {
|
||||
if (elementType.isa<mlir::IntegerType>()) {
|
||||
if (isa<mlir::IntegerType>(elementType)) {
|
||||
store[ptr][i] = APInt(width, 0);
|
||||
} else if (elementType.isa<mlir::FloatType>()) {
|
||||
} else if (isa<mlir::FloatType>(elementType)) {
|
||||
store[ptr][i] = APFloat(0.0);
|
||||
} else {
|
||||
fatalValueError("Unknown result type!\n", elementType);
|
||||
|
@ -625,7 +625,7 @@ LogicalResult HandshakeExecuter::execute(handshake::InstanceOp instanceOp,
|
|||
// Execute the instance op and create associations in the current
|
||||
// scope's value and time maps for the returned values.
|
||||
|
||||
if (auto funcSym = instanceOp->getAttr("module").cast<SymbolRefAttr>()) {
|
||||
if (auto funcSym = cast<SymbolRefAttr>(instanceOp->getAttr("module"))) {
|
||||
if (handshake::FuncOp func =
|
||||
(*module)->lookupSymbol<handshake::FuncOp>(funcSym)) {
|
||||
/// Prepare an InstanceOp for execution by creating a valueMap
|
||||
|
@ -992,9 +992,9 @@ bool simulate(StringRef toplevelFunction, ArrayRef<std::string> inputArgs,
|
|||
|
||||
for (unsigned i = 0; i < realInputs; ++i) {
|
||||
mlir::Type type = ftype.getInput(i);
|
||||
if (type.isa<mlir::MemRefType>()) {
|
||||
if (isa<mlir::MemRefType>(type)) {
|
||||
// We require this memref type to be fully specified.
|
||||
auto memreftype = type.dyn_cast<mlir::MemRefType>();
|
||||
auto memreftype = dyn_cast<mlir::MemRefType>(type);
|
||||
std::vector<Any> nothing;
|
||||
std::string x;
|
||||
unsigned buffer = allocateMemRef(memreftype, nothing, store, storeTimes);
|
||||
|
@ -1041,9 +1041,9 @@ bool simulate(StringRef toplevelFunction, ArrayRef<std::string> inputArgs,
|
|||
// Go back through the arguments and output any memrefs.
|
||||
for (unsigned i = 0; i < realInputs; ++i) {
|
||||
mlir::Type type = ftype.getInput(i);
|
||||
if (type.isa<mlir::MemRefType>()) {
|
||||
if (isa<mlir::MemRefType>(type)) {
|
||||
// We require this memref type to be fully specified.
|
||||
auto memreftype = type.dyn_cast<mlir::MemRefType>();
|
||||
auto memreftype = dyn_cast<mlir::MemRefType>(type);
|
||||
unsigned buffer = any_cast<unsigned>(valueMap[blockArgs[i]]);
|
||||
auto elementType = memreftype.getElementType();
|
||||
for (int j = 0; j < memreftype.getNumElements(); ++j) {
|
||||
|
|
Loading…
Reference in New Issue