NFC: Remove trivial builder get methods.

These don't add any value, and some are even more restrictive than the respective static 'get' method.

PiperOrigin-RevId: 275391240
This commit is contained in:
River Riddle 2019-10-17 20:08:01 -07:00 committed by A. Unique TensorFlower
parent 575405f4d6
commit 2acc220f17
38 changed files with 128 additions and 223 deletions

View File

@ -50,7 +50,7 @@ ToyDialect::ToyDialect(mlir::MLIRContext *ctx) : mlir::Dialect("toy", ctx) {
/// expected to fill in order to build the operation.
static void buildConstantOp(mlir::Builder *builder,
mlir::OperationState &result, double value) {
auto dataType = builder->getTensorType({}, builder->getF64Type());
auto dataType = RankedTensorType::get({}, builder->getF64Type());
auto dataAttribute = DenseElementsAttr::get(dataType, value);
ConstantOp::build(builder, result, dataType, dataAttribute);
}
@ -88,7 +88,7 @@ static mlir::LogicalResult verify(ConstantOp op) {
static void buildAddOp(mlir::Builder *builder, mlir::OperationState &result,
mlir::Value *lhs, mlir::Value *rhs) {
result.addTypes(builder->getTensorType(builder->getF64Type()));
result.addTypes(UnrankedTensorType::get(builder->getF64Type()));
result.addOperands({lhs, rhs});
}
@ -96,14 +96,14 @@ static void buildGenericCallOp(mlir::Builder *builder,
mlir::OperationState &result, StringRef callee,
ArrayRef<mlir::Value *> arguments) {
// Generic call always returns an unranked Tensor initially.
result.addTypes(builder->getTensorType(builder->getF64Type()));
result.addTypes(UnrankedTensorType::get(builder->getF64Type()));
result.addOperands(arguments);
result.addAttribute("callee", builder->getSymbolRefAttr(callee));
}
static void buildMulOp(mlir::Builder *builder, mlir::OperationState &result,
mlir::Value *lhs, mlir::Value *rhs) {
result.addTypes(builder->getTensorType(builder->getF64Type()));
result.addTypes(UnrankedTensorType::get(builder->getF64Type()));
result.addOperands({lhs, rhs});
}
@ -144,7 +144,7 @@ static mlir::LogicalResult verify(ReturnOp op) {
static void buildTransposeOp(mlir::Builder *builder,
mlir::OperationState &result, mlir::Value *value) {
result.addTypes(builder->getTensorType(builder->getF64Type()));
result.addTypes(UnrankedTensorType::get(builder->getF64Type()));
result.addOperands(value);
}

View File

@ -282,7 +282,7 @@ private:
// The type of this attribute is tensor of 64-bit floating-point with the
// shape of the literal.
mlir::Type elementType = builder.getF64Type();
auto dataType = builder.getTensorType(lit.getDims(), elementType);
auto dataType = mlir::RankedTensorType::get(lit.getDims(), elementType);
// This is the actual attribute that holds the list of values for this
// tensor literal.
@ -443,10 +443,10 @@ private:
mlir::Type getType(ArrayRef<int64_t> shape) {
// If the shape is empty, then this type is unranked.
if (shape.empty())
return builder.getTensorType(builder.getF64Type());
return mlir::UnrankedTensorType::get(builder.getF64Type());
// Otherwise, we use the given shape.
return builder.getTensorType(shape, builder.getF64Type());
return mlir::RankedTensorType::get(shape, builder.getF64Type());
}
/// Build an MLIR type from a Toy AST variable type (forward to the generic

View File

@ -50,7 +50,7 @@ ToyDialect::ToyDialect(mlir::MLIRContext *ctx) : mlir::Dialect("toy", ctx) {
/// expected to fill in order to build the operation.
static void buildConstantOp(mlir::Builder *builder, mlir::OperationState &state,
double value) {
auto dataType = builder->getTensorType({}, builder->getF64Type());
auto dataType = RankedTensorType::get({}, builder->getF64Type());
auto dataAttribute = DenseElementsAttr::get(dataType, value);
ConstantOp::build(builder, state, dataType, dataAttribute);
}
@ -88,7 +88,7 @@ static mlir::LogicalResult verify(ConstantOp op) {
static void buildAddOp(mlir::Builder *builder, mlir::OperationState &state,
mlir::Value *lhs, mlir::Value *rhs) {
state.addTypes(builder->getTensorType(builder->getF64Type()));
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands({lhs, rhs});
}
@ -96,14 +96,14 @@ static void buildGenericCallOp(mlir::Builder *builder,
mlir::OperationState &state, StringRef callee,
ArrayRef<mlir::Value *> arguments) {
// Generic call always returns an unranked Tensor initially.
state.addTypes(builder->getTensorType(builder->getF64Type()));
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands(arguments);
state.addAttribute("callee", builder->getSymbolRefAttr(callee));
}
static void buildMulOp(mlir::Builder *builder, mlir::OperationState &state,
mlir::Value *lhs, mlir::Value *rhs) {
state.addTypes(builder->getTensorType(builder->getF64Type()));
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands({lhs, rhs});
}
@ -144,7 +144,7 @@ static mlir::LogicalResult verify(ReturnOp op) {
static void buildTransposeOp(mlir::Builder *builder,
mlir::OperationState &state, mlir::Value *value) {
state.addTypes(builder->getTensorType(builder->getF64Type()));
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands(value);
}

View File

@ -282,7 +282,7 @@ private:
// The type of this attribute is tensor of 64-bit floating-point with the
// shape of the literal.
mlir::Type elementType = builder.getF64Type();
auto dataType = builder.getTensorType(lit.getDims(), elementType);
auto dataType = mlir::RankedTensorType::get(lit.getDims(), elementType);
// This is the actual attribute that holds the list of values for this
// tensor literal.
@ -443,10 +443,10 @@ private:
mlir::Type getType(ArrayRef<int64_t> shape) {
// If the shape is empty, then this type is unranked.
if (shape.empty())
return builder.getTensorType(builder.getF64Type());
return mlir::UnrankedTensorType::get(builder.getF64Type());
// Otherwise, we use the given shape.
return builder.getTensorType(shape, builder.getF64Type());
return mlir::RankedTensorType::get(shape, builder.getF64Type());
}
/// Build an MLIR type from a Toy AST variable type (forward to the generic

View File

@ -100,7 +100,7 @@ ToyDialect::ToyDialect(mlir::MLIRContext *ctx) : mlir::Dialect("toy", ctx) {
/// expected to fill in order to build the operation.
static void buildConstantOp(mlir::Builder *builder, mlir::OperationState &state,
double value) {
auto dataType = builder->getTensorType({}, builder->getF64Type());
auto dataType = RankedTensorType::get({}, builder->getF64Type());
auto dataAttribute = DenseElementsAttr::get(dataType, value);
ConstantOp::build(builder, state, dataType, dataAttribute);
}
@ -142,7 +142,7 @@ static mlir::LogicalResult verify(ConstantOp op) {
static void buildAddOp(mlir::Builder *builder, mlir::OperationState &state,
mlir::Value *lhs, mlir::Value *rhs) {
state.addTypes(builder->getTensorType(builder->getF64Type()));
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands({lhs, rhs});
}
@ -154,7 +154,7 @@ static void buildGenericCallOp(mlir::Builder *builder,
mlir::OperationState &state, StringRef callee,
ArrayRef<mlir::Value *> arguments) {
// Generic call always returns an unranked Tensor initially.
state.addTypes(builder->getTensorType(builder->getF64Type()));
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands(arguments);
state.addAttribute("callee", builder->getSymbolRefAttr(callee));
}
@ -171,7 +171,7 @@ Operation::operand_range GenericCallOp::getArgOperands() { return inputs(); }
static void buildMulOp(mlir::Builder *builder, mlir::OperationState &state,
mlir::Value *lhs, mlir::Value *rhs) {
state.addTypes(builder->getTensorType(builder->getF64Type()));
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands({lhs, rhs});
}
@ -235,7 +235,7 @@ static mlir::LogicalResult verify(ReturnOp op) {
static void buildTransposeOp(mlir::Builder *builder,
mlir::OperationState &state, mlir::Value *value) {
state.addTypes(builder->getTensorType(builder->getF64Type()));
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands(value);
}

View File

@ -282,7 +282,7 @@ private:
// The type of this attribute is tensor of 64-bit floating-point with the
// shape of the literal.
mlir::Type elementType = builder.getF64Type();
auto dataType = builder.getTensorType(lit.getDims(), elementType);
auto dataType = mlir::RankedTensorType::get(lit.getDims(), elementType);
// This is the actual attribute that holds the list of values for this
// tensor literal.
@ -443,10 +443,10 @@ private:
mlir::Type getType(ArrayRef<int64_t> shape) {
// If the shape is empty, then this type is unranked.
if (shape.empty())
return builder.getTensorType(builder.getF64Type());
return mlir::UnrankedTensorType::get(builder.getF64Type());
// Otherwise, we use the given shape.
return builder.getTensorType(shape, builder.getF64Type());
return mlir::RankedTensorType::get(shape, builder.getF64Type());
}
/// Build an MLIR type from a Toy AST variable type (forward to the generic

View File

@ -100,7 +100,7 @@ ToyDialect::ToyDialect(mlir::MLIRContext *ctx) : mlir::Dialect("toy", ctx) {
/// expected to fill in order to build the operation.
static void buildConstantOp(mlir::Builder *builder, mlir::OperationState &state,
double value) {
auto dataType = builder->getTensorType({}, builder->getF64Type());
auto dataType = RankedTensorType::get({}, builder->getF64Type());
auto dataAttribute = DenseElementsAttr::get(dataType, value);
ConstantOp::build(builder, state, dataType, dataAttribute);
}
@ -142,7 +142,7 @@ static mlir::LogicalResult verify(ConstantOp op) {
static void buildAddOp(mlir::Builder *builder, mlir::OperationState &state,
mlir::Value *lhs, mlir::Value *rhs) {
state.addTypes(builder->getTensorType(builder->getF64Type()));
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands({lhs, rhs});
}
@ -154,7 +154,7 @@ static void buildGenericCallOp(mlir::Builder *builder,
mlir::OperationState &state, StringRef callee,
ArrayRef<mlir::Value *> arguments) {
// Generic call always returns an unranked Tensor initially.
state.addTypes(builder->getTensorType(builder->getF64Type()));
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands(arguments);
state.addAttribute("callee", builder->getSymbolRefAttr(callee));
}
@ -171,7 +171,7 @@ Operation::operand_range GenericCallOp::getArgOperands() { return inputs(); }
static void buildMulOp(mlir::Builder *builder, mlir::OperationState &state,
mlir::Value *lhs, mlir::Value *rhs) {
state.addTypes(builder->getTensorType(builder->getF64Type()));
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands({lhs, rhs});
}
@ -235,7 +235,7 @@ static mlir::LogicalResult verify(ReturnOp op) {
static void buildTransposeOp(mlir::Builder *builder,
mlir::OperationState &state, mlir::Value *value) {
state.addTypes(builder->getTensorType(builder->getF64Type()));
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands(value);
}

View File

@ -282,7 +282,7 @@ private:
// The type of this attribute is tensor of 64-bit floating-point with the
// shape of the literal.
mlir::Type elementType = builder.getF64Type();
auto dataType = builder.getTensorType(lit.getDims(), elementType);
auto dataType = mlir::RankedTensorType::get(lit.getDims(), elementType);
// This is the actual attribute that holds the list of values for this
// tensor literal.
@ -443,10 +443,10 @@ private:
mlir::Type getType(ArrayRef<int64_t> shape) {
// If the shape is empty, then this type is unranked.
if (shape.empty())
return builder.getTensorType(builder.getF64Type());
return mlir::UnrankedTensorType::get(builder.getF64Type());
// Otherwise, we use the given shape.
return builder.getTensorType(shape, builder.getF64Type());
return mlir::RankedTensorType::get(shape, builder.getF64Type());
}
/// Build an MLIR type from a Toy AST variable type (forward to the generic

View File

@ -100,7 +100,7 @@ ToyDialect::ToyDialect(mlir::MLIRContext *ctx) : mlir::Dialect("toy", ctx) {
/// expected to fill in order to build the operation.
static void buildConstantOp(mlir::Builder *builder, mlir::OperationState &state,
double value) {
auto dataType = builder->getTensorType({}, builder->getF64Type());
auto dataType = RankedTensorType::get({}, builder->getF64Type());
auto dataAttribute = DenseElementsAttr::get(dataType, value);
ConstantOp::build(builder, state, dataType, dataAttribute);
}
@ -142,7 +142,7 @@ static mlir::LogicalResult verify(ConstantOp op) {
static void buildAddOp(mlir::Builder *builder, mlir::OperationState &state,
mlir::Value *lhs, mlir::Value *rhs) {
state.addTypes(builder->getTensorType(builder->getF64Type()));
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands({lhs, rhs});
}
@ -154,7 +154,7 @@ static void buildGenericCallOp(mlir::Builder *builder,
mlir::OperationState &state, StringRef callee,
ArrayRef<mlir::Value *> arguments) {
// Generic call always returns an unranked Tensor initially.
state.addTypes(builder->getTensorType(builder->getF64Type()));
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands(arguments);
state.addAttribute("callee", builder->getSymbolRefAttr(callee));
}
@ -171,7 +171,7 @@ Operation::operand_range GenericCallOp::getArgOperands() { return inputs(); }
static void buildMulOp(mlir::Builder *builder, mlir::OperationState &state,
mlir::Value *lhs, mlir::Value *rhs) {
state.addTypes(builder->getTensorType(builder->getF64Type()));
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands({lhs, rhs});
}
@ -235,7 +235,7 @@ static mlir::LogicalResult verify(ReturnOp op) {
static void buildTransposeOp(mlir::Builder *builder,
mlir::OperationState &state, mlir::Value *value) {
state.addTypes(builder->getTensorType(builder->getF64Type()));
state.addTypes(UnrankedTensorType::get(builder->getF64Type()));
state.addOperands(value);
}

View File

@ -282,7 +282,7 @@ private:
// The type of this attribute is tensor of 64-bit floating-point with the
// shape of the literal.
mlir::Type elementType = builder.getF64Type();
auto dataType = builder.getTensorType(lit.getDims(), elementType);
auto dataType = mlir::RankedTensorType::get(lit.getDims(), elementType);
// This is the actual attribute that holds the list of values for this
// tensor literal.
@ -443,10 +443,10 @@ private:
mlir::Type getType(ArrayRef<int64_t> shape) {
// If the shape is empty, then this type is unranked.
if (shape.empty())
return builder.getTensorType(builder.getF64Type());
return mlir::UnrankedTensorType::get(builder.getF64Type());
// Otherwise, we use the given shape.
return builder.getTensorType(shape, builder.getF64Type());
return mlir::RankedTensorType::get(shape, builder.getF64Type());
}
/// Build an MLIR type from a Toy AST variable type (forward to the generic

View File

@ -135,11 +135,10 @@ private:
case spirv::BuiltIn::LocalInvocationId:
case spirv::BuiltIn::GlobalInvocationId: {
auto ptrType = spirv::PointerType::get(
builder.getVectorType({3}, builder.getIntegerType(32)),
VectorType::get({3}, builder.getIntegerType(32)),
spirv::StorageClass::Input);
newVarOp = builder.create<spirv::GlobalVariableOp>(
loc, builder.getTypeAttr(ptrType), builder.getStringAttr(name),
nullptr);
loc, TypeAttr::get(ptrType), builder.getStringAttr(name), nullptr);
newVarOp.setAttr(
convertToSnakeCase(stringifyDecoration(spirv::Decoration::BuiltIn)),
builder.getStringAttr(stringifyBuiltIn(builtin)));

View File

@ -33,7 +33,7 @@ def AffineMapAttr : Attr<
CPred<"$_self.isa<AffineMapAttr>()">, "AffineMap attribute"> {
let storageType = [{ AffineMapAttr }];
let returnType = [{ AffineMap }];
let constBuilderCall = "$_builder.getAffineMapAttr($0)";
let constBuilderCall = "AffineMapAttr::get($0)";
}
def AffineMapArrayAttr : TypedArrayAttrBase<AffineMapAttr,

View File

@ -1067,7 +1067,7 @@ def TensorLoadOp : Std_Op<"tensor_load",
let builders = [OpBuilder<
"Builder *builder, OperationState &result, Value *memref", [{
auto memrefType = memref->getType().cast<MemRefType>();
auto resultType = builder->getTensorType(memrefType.getShape(),
auto resultType = RankedTensorType::get(memrefType.getShape(),
memrefType.getElementType());
result.addOperands(memref);
result.addTypes(resultType);

View File

@ -80,12 +80,6 @@ public:
IntegerType getI1Type();
IntegerType getIntegerType(unsigned width);
FunctionType getFunctionType(ArrayRef<Type> inputs, ArrayRef<Type> results);
MemRefType getMemRefType(ArrayRef<int64_t> shape, Type elementType,
ArrayRef<AffineMap> affineMapComposition = {},
unsigned memorySpace = 0);
VectorType getVectorType(ArrayRef<int64_t> shape, Type elementType);
RankedTensorType getTensorType(ArrayRef<int64_t> shape, Type elementType);
UnrankedTensorType getTensorType(Type elementType);
TupleType getTupleType(ArrayRef<Type> elementTypes);
NoneType getNoneType();
@ -105,22 +99,10 @@ public:
FloatAttr getFloatAttr(Type type, double value);
FloatAttr getFloatAttr(Type type, const APFloat &value);
StringAttr getStringAttr(StringRef bytes);
StringAttr getStringAttr(StringRef bytes, Type type);
ArrayAttr getArrayAttr(ArrayRef<Attribute> value);
AffineMapAttr getAffineMapAttr(AffineMap map);
IntegerSetAttr getIntegerSetAttr(IntegerSet set);
TypeAttr getTypeAttr(Type type);
SymbolRefAttr getSymbolRefAttr(Operation *value);
SymbolRefAttr getSymbolRefAttr(StringRef value);
ElementsAttr getDenseElementsAttr(ShapedType type,
ArrayRef<Attribute> values);
ElementsAttr getDenseIntElementsAttr(ShapedType type,
ArrayRef<int64_t> values);
ElementsAttr getSparseElementsAttr(ShapedType type,
DenseIntElementsAttr indices,
DenseElementsAttr values);
ElementsAttr getOpaqueElementsAttr(Dialect *dialect, ShapedType type,
StringRef bytes);
// Returns a 0-valued attribute of the given `type`. This function only
// supports boolean, integer, and 16-/32-/64-bit float types, and vector or
// ranked tensor of them. Returns null attribute otherwise.
@ -149,9 +131,6 @@ public:
AffineExpr getAffineSymbolExpr(unsigned position);
AffineExpr getAffineConstantExpr(int64_t constant);
AffineMap getAffineMap(unsigned dimCount, unsigned symbolCount,
ArrayRef<AffineExpr> results);
// Special cases of affine maps and integer sets
/// Returns a zero result affine map with no dimensions or symbols: () -> ().
AffineMap getEmptyAffineMap();
@ -175,11 +154,6 @@ public:
/// returns: (d0, d1)[s0] -> (d0 + 2, d1 + s0 + 2)
AffineMap getShiftedAffineMap(AffineMap map, int64_t shift);
// Integer set.
IntegerSet getIntegerSet(unsigned dimCount, unsigned symbolCount,
ArrayRef<AffineExpr> constraints,
ArrayRef<bool> isEq);
// TODO: Helpers for affine map/exprs, etc.
protected:
MLIRContext *context;
};

View File

@ -970,7 +970,7 @@ class IntElementsAttr<int width> : ElementsAttrBase<
// Note that this is only constructing scalar elements attribute.
let constBuilderCall = "DenseElementsAttr::get("
"$_builder.getTensorType({}, $_builder.getIntegerType(" # width # ")), "
"RankedTensorType::get({}, $_builder.getIntegerType(" # width # ")), "
"llvm::makeArrayRef($0)).cast<DenseIntElementsAttr>()";
let convertFromStorage = "$_self";
}
@ -989,7 +989,7 @@ class FloatElementsAttr<int width> : ElementsAttrBase<
// Note that this is only constructing scalar elements attribute.
let constBuilderCall = "DenseElementsAttr::get("
"$_builder.getTensorType({}, $_builder.getF" # width # "Type()),"
"RankedTensorType::get({}, $_builder.getF" # width # "Type()),"
"llvm::makeArrayRef($0))";
let convertFromStorage = "$_self";
}
@ -1013,7 +1013,7 @@ class RankedFloatElementsAttr<int width, list<int> dims> : ElementsAttrBase<
let returnType = [{ DenseFPElementsAttr }];
let constBuilderCall = "DenseElementsAttr::get("
"$_builder.getTensorType({" # StrJoinInt<dims>.result #
"RankedTensorType::get({" # StrJoinInt<dims>.result #
"}, $_builder.getF" # width # "Type()), "
"llvm::makeArrayRef($0)).cast<DenseFPElementsAttr>()";
let convertFromStorage = "$_self";

View File

@ -77,7 +77,7 @@ void mlir::buildTripCountMapAndOperands(
SmallVector<AffineExpr, 4> lbSplatExpr(ubValueMap.getNumResults(),
lbMap.getResult(0));
auto lbMapSplat =
b.getAffineMap(lbMap.getNumDims(), lbMap.getNumSymbols(), lbSplatExpr);
AffineMap::get(lbMap.getNumDims(), lbMap.getNumSymbols(), lbSplatExpr);
AffineValueMap lbSplatValueMap(lbMapSplat, lbOperands);
AffineValueMap tripCountValueMap;

View File

@ -132,7 +132,7 @@ static Value *createAndLoadGlobalVarForEntryFnArg(PatternRewriter &rewriter,
funcOp.getName().str() + "_arg_" + std::to_string(origArgNum);
var = rewriter.create<spirv::GlobalVariableOp>(
funcOp.getLoc(),
rewriter.getTypeAttr(getGlobalVarTypeForEntryFnArg(origArg->getType())),
TypeAttr::get(getGlobalVarTypeForEntryFnArg(origArg->getType())),
rewriter.getStringAttr(varName), nullptr);
var.setAttr(
spirv::SPIRVDialect::getAttributeName(spirv::Decoration::DescriptorSet),

View File

@ -198,7 +198,7 @@ void AffineApplyOp::build(Builder *builder, OperationState &result,
AffineMap map, ArrayRef<Value *> operands) {
result.addOperands(operands);
result.types.append(map.getNumResults(), builder->getIndexType());
result.addAttribute("map", builder->getAffineMapAttr(map));
result.addAttribute("map", AffineMapAttr::get(map));
}
ParseResult AffineApplyOp::parse(OpAsmParser &parser, OperationState &result) {
@ -817,13 +817,13 @@ void AffineDmaStartOp::build(Builder *builder, OperationState &result,
ArrayRef<Value *> tagIndices, Value *numElements,
Value *stride, Value *elementsPerStride) {
result.addOperands(srcMemRef);
result.addAttribute(getSrcMapAttrName(), builder->getAffineMapAttr(srcMap));
result.addAttribute(getSrcMapAttrName(), AffineMapAttr::get(srcMap));
result.addOperands(srcIndices);
result.addOperands(destMemRef);
result.addAttribute(getDstMapAttrName(), builder->getAffineMapAttr(dstMap));
result.addAttribute(getDstMapAttrName(), AffineMapAttr::get(dstMap));
result.addOperands(destIndices);
result.addOperands(tagMemRef);
result.addAttribute(getTagMapAttrName(), builder->getAffineMapAttr(tagMap));
result.addAttribute(getTagMapAttrName(), AffineMapAttr::get(tagMap));
result.addOperands(tagIndices);
result.addOperands(numElements);
if (stride) {
@ -985,7 +985,7 @@ void AffineDmaWaitOp::build(Builder *builder, OperationState &result,
Value *tagMemRef, AffineMap tagMap,
ArrayRef<Value *> tagIndices, Value *numElements) {
result.addOperands(tagMemRef);
result.addAttribute(getTagMapAttrName(), builder->getAffineMapAttr(tagMap));
result.addAttribute(getTagMapAttrName(), AffineMapAttr::get(tagMap));
result.addOperands(tagIndices);
result.addOperands(numElements);
}
@ -1073,13 +1073,11 @@ void AffineForOp::build(Builder *builder, OperationState &result,
builder->getIntegerAttr(builder->getIndexType(), step));
// Add the lower bound.
result.addAttribute(getLowerBoundAttrName(),
builder->getAffineMapAttr(lbMap));
result.addAttribute(getLowerBoundAttrName(), AffineMapAttr::get(lbMap));
result.addOperands(lbOperands);
// Add the upper bound.
result.addAttribute(getUpperBoundAttrName(),
builder->getAffineMapAttr(ubMap));
result.addAttribute(getUpperBoundAttrName(), AffineMapAttr::get(ubMap));
result.addOperands(ubOperands);
// Create a region and a block for the body. The argument of the region is
@ -1164,7 +1162,7 @@ static ParseResult parseBound(bool isLower, OperationState &result,
// for storage. Analysis passes may expand it into a multi-dimensional map
// if desired.
AffineMap map = builder.getSymbolIdentityMap();
result.addAttribute(boundAttrName, builder.getAffineMapAttr(map));
result.addAttribute(boundAttrName, AffineMapAttr::get(map));
return success();
}
@ -1213,8 +1211,8 @@ static ParseResult parseBound(bool isLower, OperationState &result,
if (auto integerAttr = boundAttr.dyn_cast<IntegerAttr>()) {
result.attributes.pop_back();
result.addAttribute(
boundAttrName, builder.getAffineMapAttr(
builder.getConstantAffineMap(integerAttr.getInt())));
boundAttrName,
AffineMapAttr::get(builder.getConstantAffineMap(integerAttr.getInt())));
return success();
}
@ -1752,7 +1750,7 @@ void AffineLoadOp::build(Builder *builder, OperationState &result,
assert(operands.size() == 1 + map.getNumInputs() && "inconsistent operands");
result.addOperands(operands);
if (map)
result.addAttribute(getMapAttrName(), builder->getAffineMapAttr(map));
result.addAttribute(getMapAttrName(), AffineMapAttr::get(map));
auto memrefType = operands[0]->getType().cast<MemRefType>();
result.types.push_back(memrefType.getElementType());
}
@ -1764,7 +1762,7 @@ void AffineLoadOp::build(Builder *builder, OperationState &result,
result.addOperands(memref);
result.addOperands(mapOperands);
auto memrefType = memref->getType().cast<MemRefType>();
result.addAttribute(getMapAttrName(), builder->getAffineMapAttr(map));
result.addAttribute(getMapAttrName(), AffineMapAttr::get(map));
result.types.push_back(memrefType.getElementType());
}
@ -1855,7 +1853,7 @@ void AffineStoreOp::build(Builder *builder, OperationState &result,
result.addOperands(valueToStore);
result.addOperands(memref);
result.addOperands(mapOperands);
result.addAttribute(getMapAttrName(), builder->getAffineMapAttr(map));
result.addAttribute(getMapAttrName(), AffineMapAttr::get(map));
}
// Use identity map.

View File

@ -869,7 +869,7 @@ void GlobalOp::build(Builder *builder, OperationState &result, LLVMType type,
ArrayRef<NamedAttribute> attrs) {
result.addAttribute(SymbolTable::getSymbolAttrName(),
builder->getStringAttr(name));
result.addAttribute("type", builder->getTypeAttr(type));
result.addAttribute("type", TypeAttr::get(type));
if (isConstant)
result.addAttribute("constant", builder->getUnitAttr());
if (value)
@ -939,7 +939,7 @@ static ParseResult parseGlobalOp(OpAsmParser &parser, OperationState &result) {
}
}
result.addAttribute("type", parser.getBuilder().getTypeAttr(types[0]));
result.addAttribute("type", TypeAttr::get(types[0]));
return success();
}
@ -1026,7 +1026,7 @@ void LLVMFuncOp::build(Builder *builder, OperationState &result, StringRef name,
result.addRegion();
result.addAttribute(SymbolTable::getSymbolAttrName(),
builder->getStringAttr(name));
result.addAttribute("type", builder->getTypeAttr(type));
result.addAttribute("type", TypeAttr::get(type));
result.attributes.append(attrs.begin(), attrs.end());
if (argAttrs.empty())
return;

View File

@ -1244,7 +1244,7 @@ static ParseResult parseGlobalVariableOp(OpAsmParser &parser,
if (!type.isa<spirv::PointerType>()) {
return parser.emitError(loc, "expected spv.ptr type");
}
state.addAttribute(kTypeAttrName, parser.getBuilder().getTypeAttr(type));
state.addAttribute(kTypeAttrName, TypeAttr::get(type));
return success();
}

View File

@ -954,8 +954,8 @@ LogicalResult Deserializer::processGlobalVariable(ArrayRef<uint32_t> operands) {
<< wordIndex << " of " << operands.size() << " processed";
}
auto varOp = opBuilder.create<spirv::GlobalVariableOp>(
unknownLoc, opBuilder.getTypeAttr(type),
opBuilder.getStringAttr(variableName), initializer);
unknownLoc, TypeAttr::get(type), opBuilder.getStringAttr(variableName),
initializer);
// Decorations.
if (decorations.count(variableID)) {
@ -1065,7 +1065,7 @@ LogicalResult Deserializer::processType(spirv::Opcode opcode,
return emitError(unknownLoc, "OpTypeVector references undefined <id> ")
<< operands[1];
}
typeMap[operands[0]] = opBuilder.getVectorType({operands[2]}, elementTy);
typeMap[operands[0]] = VectorType::get({operands[2]}, elementTy);
} break;
case spirv::Opcode::OpTypePointer: {
if (operands.size() != 3) {
@ -1391,7 +1391,7 @@ Deserializer::processConstantComposite(ArrayRef<uint32_t> operands) {
auto resultID = operands[1];
if (auto vectorType = resultType.dyn_cast<VectorType>()) {
auto attr = opBuilder.getDenseElementsAttr(vectorType, elements);
auto attr = DenseElementsAttr::get(vectorType, elements);
// For normal constants, we just record the attribute (and its type) for
// later materialization at use sites.
constantMap.try_emplace(resultID, attr, resultType);

View File

@ -58,7 +58,7 @@ public:
}
rewriter.replaceOpWithNewOp<spirv::GlobalVariableOp>(
op, rewriter.getTypeAttr(decoratedType), globalVarAttrs);
op, TypeAttr::get(decoratedType), globalVarAttrs);
return matchSuccess();
}
};

View File

@ -653,11 +653,11 @@ static Type getCheckedI1SameShape(Builder *build, Type type) {
if (type.isIntOrIndexOrFloat())
return i1Type;
if (auto tensorType = type.dyn_cast<RankedTensorType>())
return build->getTensorType(tensorType.getShape(), i1Type);
return RankedTensorType::get(tensorType.getShape(), i1Type);
if (type.isa<UnrankedTensorType>())
return build->getTensorType(i1Type);
return UnrankedTensorType::get(i1Type);
if (auto vectorType = type.dyn_cast<VectorType>())
return build->getVectorType(vectorType.getShape(), i1Type);
return VectorType::get(vectorType.getShape(), i1Type);
return Type();
}
@ -2241,7 +2241,7 @@ OpFoldResult TensorCastOp::fold(ArrayRef<Attribute> operands) {
static Type getTensorTypeFromMemRefType(Builder &b, Type type) {
if (auto memref = type.dyn_cast<MemRefType>())
return b.getTensorType(memref.getShape(), memref.getElementType());
return RankedTensorType::get(memref.getShape(), memref.getElementType());
return b.getNoneType();
}

View File

@ -206,7 +206,7 @@ void VectorTransferReadOp::build(Builder *builder, OperationState &result,
result.addOperands({*paddingValue});
}
result.addAttribute(getPermutationMapAttrName(),
builder->getAffineMapAttr(permutationMap));
AffineMapAttr::get(permutationMap));
result.addTypes(vectorType);
}
@ -383,7 +383,7 @@ void VectorTransferWriteOp::build(Builder *builder, OperationState &result,
result.addOperands({srcVector, dstMemRef});
result.addOperands(dstIndices);
result.addAttribute(getPermutationMapAttrName(),
builder->getAffineMapAttr(permutationMap));
AffineMapAttr::get(permutationMap));
}
auto VectorTransferWriteOp::getIndices() -> operand_range {

View File

@ -72,25 +72,6 @@ FunctionType Builder::getFunctionType(ArrayRef<Type> inputs,
return FunctionType::get(inputs, results, context);
}
MemRefType Builder::getMemRefType(ArrayRef<int64_t> shape, Type elementType,
ArrayRef<AffineMap> affineMapComposition,
unsigned memorySpace) {
return MemRefType::get(shape, elementType, affineMapComposition, memorySpace);
}
VectorType Builder::getVectorType(ArrayRef<int64_t> shape, Type elementType) {
return VectorType::get(shape, elementType);
}
RankedTensorType Builder::getTensorType(ArrayRef<int64_t> shape,
Type elementType) {
return RankedTensorType::get(shape, elementType);
}
UnrankedTensorType Builder::getTensorType(Type elementType) {
return UnrankedTensorType::get(elementType);
}
TupleType Builder::getTupleType(ArrayRef<Type> elementTypes) {
return TupleType::get(elementTypes, context);
}
@ -165,24 +146,10 @@ StringAttr Builder::getStringAttr(StringRef bytes) {
return StringAttr::get(bytes, context);
}
StringAttr Builder::getStringAttr(StringRef bytes, Type type) {
return StringAttr::get(bytes, type);
}
ArrayAttr Builder::getArrayAttr(ArrayRef<Attribute> value) {
return ArrayAttr::get(value, context);
}
AffineMapAttr Builder::getAffineMapAttr(AffineMap map) {
return AffineMapAttr::get(map);
}
IntegerSetAttr Builder::getIntegerSetAttr(IntegerSet set) {
return IntegerSetAttr::get(set);
}
TypeAttr Builder::getTypeAttr(Type type) { return TypeAttr::get(type); }
SymbolRefAttr Builder::getSymbolRefAttr(Operation *value) {
auto symName =
value->getAttrOfType<StringAttr>(SymbolTable::getSymbolAttrName());
@ -193,27 +160,6 @@ SymbolRefAttr Builder::getSymbolRefAttr(StringRef value) {
return SymbolRefAttr::get(value, getContext());
}
ElementsAttr Builder::getDenseElementsAttr(ShapedType type,
ArrayRef<Attribute> values) {
return DenseElementsAttr::get(type, values);
}
ElementsAttr Builder::getDenseIntElementsAttr(ShapedType type,
ArrayRef<int64_t> values) {
return DenseIntElementsAttr::get(type, values);
}
ElementsAttr Builder::getSparseElementsAttr(ShapedType type,
DenseIntElementsAttr indices,
DenseElementsAttr values) {
return SparseElementsAttr::get(type, indices, values);
}
ElementsAttr Builder::getOpaqueElementsAttr(Dialect *dialect, ShapedType type,
StringRef bytes) {
return OpaqueElementsAttr::get(dialect, type, bytes);
}
ArrayAttr Builder::getI32ArrayAttr(ArrayRef<int32_t> values) {
auto attrs = functional::map(
[this](int32_t v) -> Attribute { return getI32IntegerAttr(v); }, values);
@ -255,7 +201,7 @@ ArrayAttr Builder::getStrArrayAttr(ArrayRef<StringRef> values) {
ArrayAttr Builder::getAffineMapArrayAttr(ArrayRef<AffineMap> values) {
auto attrs = functional::map(
[this](AffineMap v) -> Attribute { return getAffineMapAttr(v); }, values);
[](AffineMap v) -> Attribute { return AffineMapAttr::get(v); }, values);
return getArrayAttr(attrs);
}
@ -278,7 +224,7 @@ Attribute Builder::getZeroAttr(Type type) {
auto element = getZeroAttr(vtType.getElementType());
if (!element)
return {};
return getDenseElementsAttr(vtType, element);
return DenseElementsAttr::get(vtType, element);
}
default:
break;
@ -290,11 +236,6 @@ Attribute Builder::getZeroAttr(Type type) {
// Affine Expressions, Affine Maps, and Integet Sets.
//===----------------------------------------------------------------------===//
AffineMap Builder::getAffineMap(unsigned dimCount, unsigned symbolCount,
ArrayRef<AffineExpr> results) {
return AffineMap::get(dimCount, symbolCount, results);
}
AffineExpr Builder::getAffineDimExpr(unsigned position) {
return mlir::getAffineDimExpr(position, context);
}
@ -307,12 +248,6 @@ AffineExpr Builder::getAffineConstantExpr(int64_t constant) {
return mlir::getAffineConstantExpr(constant, context);
}
IntegerSet Builder::getIntegerSet(unsigned dimCount, unsigned symbolCount,
ArrayRef<AffineExpr> constraints,
ArrayRef<bool> isEq) {
return IntegerSet::get(dimCount, symbolCount, constraints, isEq);
}
AffineMap Builder::getEmptyAffineMap() { return AffineMap::get(context); }
AffineMap Builder::getConstantAffineMap(int64_t val) {
@ -347,9 +282,8 @@ AffineMap Builder::getSingleDimShiftAffineMap(int64_t shift) {
AffineMap Builder::getShiftedAffineMap(AffineMap map, int64_t shift) {
SmallVector<AffineExpr, 4> shiftedResults;
shiftedResults.reserve(map.getNumResults());
for (auto resultExpr : map.getResults()) {
for (auto resultExpr : map.getResults())
shiftedResults.push_back(resultExpr + shift);
}
return AffineMap::get(map.getNumDims(), map.getNumSymbols(), shiftedResults);
}

View File

@ -57,7 +57,7 @@ void FuncOp::build(Builder *builder, OperationState &result, StringRef name,
FunctionType type, ArrayRef<NamedAttribute> attrs) {
result.addAttribute(SymbolTable::getSymbolAttrName(),
builder->getStringAttr(name));
result.addAttribute(getTypeAttrName(), builder->getTypeAttr(type));
result.addAttribute(getTypeAttrName(), TypeAttr::get(type));
result.attributes.append(attrs.begin(), attrs.end());
result.addRegion();
}

View File

@ -133,7 +133,7 @@ mlir::impl::parseFunctionLikeOp(OpAsmParser &parser, OperationState &result,
std::string errorMessage;
if (auto type = funcTypeBuilder(builder, argTypes, results,
impl::VariadicFlag(isVariadic), errorMessage))
result.addAttribute(getTypeAttrName(), builder.getTypeAttr(type));
result.addAttribute(getTypeAttrName(), TypeAttr::get(type));
else
return parser.emitError(signatureLocation)
<< "failed to construct function type"

View File

@ -1063,9 +1063,9 @@ Attribute Parser::parseAttribute(Type type) {
if (parseAffineMapOrIntegerSetReference(map, set))
return nullptr;
if (map)
return builder.getAffineMapAttr(map);
return AffineMapAttr::get(map);
assert(set);
return builder.getIntegerSetAttr(set);
return IntegerSetAttr::get(set);
}
// Parse an array attribute.
@ -1164,7 +1164,7 @@ Attribute Parser::parseAttribute(Type type) {
default:
// Parse a type attribute.
if (Type type = parseType())
return builder.getTypeAttr(type);
return TypeAttr::get(type);
return nullptr;
}
}
@ -1381,7 +1381,7 @@ Attribute Parser::parseOpaqueElementsAttr() {
if (!type)
return nullptr;
return builder.getOpaqueElementsAttr(dialect, type, llvm::fromHex(val));
return OpaqueElementsAttr::get(dialect, type, llvm::fromHex(val));
}
namespace {
@ -2496,8 +2496,8 @@ ParseResult AffineParser::parseAffineMapOfSSAIds(AffineMap &map) {
if (exprs.empty())
map = AffineMap();
else
map = builder.getAffineMap(numDimOperands,
dimsAndSymbols.size() - numDimOperands, exprs);
map = AffineMap::get(numDimOperands, dimsAndSymbols.size() - numDimOperands,
exprs);
return success();
}
@ -2525,7 +2525,7 @@ AffineMap AffineParser::parseAffineMapRange(unsigned numDims,
return AffineMap();
// Parsed a valid affine map.
return builder.getAffineMap(numDims, numSymbols, exprs);
return AffineMap::get(numDims, numSymbols, exprs);
}
/// Parse an affine constraint.
@ -2600,11 +2600,11 @@ IntegerSet AffineParser::parseIntegerSetConstraints(unsigned numDims,
if (constraints.empty()) {
/* 0 == 0 */
auto zero = getAffineConstantExpr(0, getContext());
return builder.getIntegerSet(numDims, numSymbols, zero, true);
return IntegerSet::get(numDims, numSymbols, zero, true);
}
// Parsed a valid integer set.
return builder.getIntegerSet(numDims, numSymbols, constraints, isEqs);
return IntegerSet::get(numDims, numSymbols, constraints, isEqs);
}
/// Parse an ambiguous reference to either and affine map or an integer set.
@ -3715,7 +3715,7 @@ public:
return failure();
// Add AffineMap attribute.
if (map) {
mapAttr = parser.builder.getAffineMapAttr(map);
mapAttr = AffineMapAttr::get(map);
attrs.push_back(parser.builder.getNamedAttr(attrName, mapAttr));
}

View File

@ -81,7 +81,7 @@ void AddDefaultStatsPass::runWithConfig(SolverContext &solverContext,
APFloat minValue(-1.0f);
APFloat maxValue(1.0f);
ElementsAttr layerStats = DenseFPElementsAttr::get(
b.getTensorType({2}, b.getF32Type()), {minValue, maxValue});
RankedTensorType::get({2}, b.getF32Type()), {minValue, maxValue});
auto statsOp = b.create<StatisticsOp>(func.getLoc(), arg, layerStats,
nullptr, nullptr);
arg->replaceAllUsesWith(statsOp);
@ -107,7 +107,7 @@ void AddDefaultStatsPass::runWithConfig(SolverContext &solverContext,
APFloat minValue(-1.0f);
APFloat maxValue(1.0f);
ElementsAttr layerStats = DenseFPElementsAttr::get(
b.getTensorType({2}, b.getF32Type()), {minValue, maxValue});
RankedTensorType::get({2}, b.getF32Type()), {minValue, maxValue});
auto statsOp = b.create<StatisticsOp>(op->getLoc(), op->getResult(0),
layerStats, nullptr, nullptr);
originalResult->replaceAllUsesWith(statsOp);

View File

@ -953,8 +953,8 @@ static Value *createPrivateMemRef(AffineForOp forOp, Operation *srcStoreOpInst,
} else {
newMemSpace = oldMemRefType.getMemorySpace();
}
auto newMemRefType = top.getMemRefType(
newShape, oldMemRefType.getElementType(), {}, newMemSpace);
auto newMemRefType = MemRefType::get(newShape, oldMemRefType.getElementType(),
{}, newMemSpace);
// Gather alloc operands for the dynamic dimensions of the memref.
SmallVector<Value *, 4> allocOperands;
unsigned dynamicDimCount = 0;
@ -988,7 +988,7 @@ static Value *createPrivateMemRef(AffineForOp forOp, Operation *srcStoreOpInst,
}
auto indexRemap = zeroOffsetCount == rank
? AffineMap()
: b.getAffineMap(outerIVs.size() + rank, 0, remapExprs);
: AffineMap::get(outerIVs.size() + rank, 0, remapExprs);
// Replace all users of 'oldMemRef' with 'newMemRef'.
LogicalResult res =
replaceAllMemRefUsesWith(oldMemRef, newMemRef, {}, indexRemap,

View File

@ -168,13 +168,13 @@ constructTiledIndexSetHyperRect(MutableArrayRef<AffineForOp> origLoops,
boundExprs.push_back(dim + tileSizes[i]);
boundExprs.append(origUbMap.getResults().begin(),
origUbMap.getResults().end());
auto ubMap = b.getAffineMap(origUbMap.getNumDims() + 1,
auto ubMap = AffineMap::get(origUbMap.getNumDims() + 1,
origUbMap.getNumSymbols(), boundExprs);
newLoops[width + i].setUpperBound(/*operands=*/ubOperands, ubMap);
} else {
// No need of the min expression.
auto dim = b.getAffineDimExpr(0);
auto ubMap = b.getAffineMap(1, 0, dim + tileSizes[i]);
auto ubMap = AffineMap::get(1, 0, dim + tileSizes[i]);
newLoops[width + i].setUpperBound(newLoops[i].getInductionVar(), ubMap);
}
}

View File

@ -223,7 +223,7 @@ LogicalResult mlir::loopUnrollJamByFactor(AffineForOp forOp,
if (!forOpIV->use_empty()) {
// iv' = iv + i, i = 1 to unrollJamFactor-1.
auto d0 = builder.getAffineDimExpr(0);
auto bumpMap = builder.getAffineMap(1, 0, {d0 + i * step});
auto bumpMap = AffineMap::get(1, 0, {d0 + i * step});
auto ivUnroll =
builder.create<AffineApplyOp>(forInst->getLoc(), bumpMap, forOpIV);
operandMapping.map(forOpIV, ivUnroll);

View File

@ -82,7 +82,7 @@ static bool doubleBuffer(Value *oldMemRef, AffineForOp forOp) {
newShape[0] = 2;
std::copy(oldShape.begin(), oldShape.end(), newShape.begin() + 1);
auto newMemRefType =
bInner.getMemRefType(newShape, oldMemRefType.getElementType(), {},
MemRefType::get(newShape, oldMemRefType.getElementType(), {},
oldMemRefType.getMemorySpace());
return newMemRefType;
};
@ -109,7 +109,7 @@ static bool doubleBuffer(Value *oldMemRef, AffineForOp forOp) {
// Create 'iv mod 2' value to index the leading dimension.
auto d0 = bInner.getAffineDimExpr(0);
int64_t step = forOp.getStep();
auto modTwoMap = bInner.getAffineMap(/*dimCount=*/1, /*symbolCount=*/0,
auto modTwoMap = AffineMap::get(/*dimCount=*/1, /*symbolCount=*/0,
{d0.floorDiv(step) % 2});
auto ivModTwoOp = bInner.create<AffineApplyOp>(forOp.getLoc(), modTwoMap,
forOp.getInductionVar());

View File

@ -87,7 +87,7 @@ void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
for (unsigned i = 0, e = tripCountMap.getNumResults(); i < e; i++) {
auto tripCountExpr = tripCountMap.getResult(i);
bumpExprs[i] = (tripCountExpr - tripCountExpr % unrollFactor) * step;
auto bumpMap = b.getAffineMap(tripCountMap.getNumDims(),
auto bumpMap = AffineMap::get(tripCountMap.getNumDims(),
tripCountMap.getNumSymbols(), bumpExprs[i]);
bumpValues[i] =
b.create<AffineApplyOp>(forOp.getLoc(), bumpMap, tripCountOperands);
@ -100,7 +100,7 @@ void mlir::getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
operands->clear();
operands->push_back(lb);
operands->append(bumpValues.begin(), bumpValues.end());
*map = b.getAffineMap(1 + tripCountMap.getNumResults(), 0, newUbExprs);
*map = AffineMap::get(1 + tripCountMap.getNumResults(), 0, newUbExprs);
// Simplify the map + operands.
fullyComposeAffineMapAndOperands(map, operands);
*map = simplifyAffineMap(*map);
@ -487,7 +487,7 @@ LogicalResult mlir::loopUnrollByFactor(AffineForOp forOp,
if (!forOpIV->use_empty()) {
// iv' = iv + 1/2/3...unrollFactor-1;
auto d0 = builder.getAffineDimExpr(0);
auto bumpMap = builder.getAffineMap(1, 0, {d0 + i * step});
auto bumpMap = AffineMap::get(1, 0, {d0 + i * step});
auto ivUnroll =
builder.create<AffineApplyOp>(forOp.getLoc(), bumpMap, forOpIV);
operandMap.map(forOpIV, ivUnroll);
@ -676,7 +676,7 @@ static void augmentMapAndBounds(OpBuilder &b, Value *iv, AffineMap *map,
auto bounds = llvm::to_vector<4>(map->getResults());
bounds.push_back(b.getAffineDimExpr(map->getNumDims()) + offset);
operands->insert(operands->begin() + map->getNumDims(), iv);
*map = b.getAffineMap(map->getNumDims() + 1, map->getNumSymbols(), bounds);
*map = AffineMap::get(map->getNumDims() + 1, map->getNumSymbols(), bounds);
canonicalizeMapAndOperands(map, operands);
}
@ -1229,7 +1229,7 @@ static AffineForOp generatePointWiseCopy(Location loc, Value *memref,
? memIndicesStart[d]
: b.create<AffineApplyOp>(
loc,
b.getAffineMap(memAffineMap.getNumDims(),
AffineMap::get(memAffineMap.getNumDims(),
memAffineMap.getNumSymbols(),
memAffineMap.getResult(d)),
memIndicesStart);
@ -1238,7 +1238,7 @@ static AffineForOp generatePointWiseCopy(Location loc, Value *memref,
SmallVector<Value *, 2> operands = {memBase, forOp.getInductionVar()};
auto memIndex = b.create<AffineApplyOp>(
loc,
b.getAffineMap(2, 0, b.getAffineDimExpr(0) + b.getAffineDimExpr(1)),
AffineMap::get(2, 0, b.getAffineDimExpr(0) + b.getAffineDimExpr(1)),
operands);
memIndices.push_back(memIndex);
}
@ -1381,7 +1381,7 @@ static LogicalResult generateCopy(
} else {
// The coordinate for the start location is just the lower bound along the
// corresponding dimension on the memory region (stored in 'offset').
auto map = top.getAffineMap(
auto map = AffineMap::get(
cst->getNumDimIds() + cst->getNumSymbolIds() - rank, 0, offset);
memIndices.push_back(b.create<AffineApplyOp>(loc, map, regionSymbols));
}
@ -1401,7 +1401,7 @@ static LogicalResult generateCopy(
if (!existingBuf) {
AffineMap fastBufferLayout = b.getMultiDimIdentityMap(rank);
auto fastMemRefType =
top.getMemRefType(fastBufferShape, memRefType.getElementType(),
MemRefType::get(fastBufferShape, memRefType.getElementType(),
fastBufferLayout, copyOptions.fastMemorySpace);
// Create the fast memory space buffer just before the 'affine.for'
@ -1470,7 +1470,7 @@ static LogicalResult generateCopy(
} else {
// DMA generation.
// Create a tag (single element 1-d memref) for the DMA.
auto tagMemRefType = top.getMemRefType({1}, top.getIntegerType(32), {},
auto tagMemRefType = MemRefType::get({1}, top.getIntegerType(32), {},
copyOptions.tagMemorySpace);
auto tagMemRef = prologue.create<AllocOp>(loc, tagMemRefType);
@ -1532,7 +1532,7 @@ static LogicalResult generateCopy(
auto dimExpr = b.getAffineDimExpr(regionSymbols.size() + i);
remapExprs.push_back(dimExpr - offsets[i]);
}
auto indexRemap = b.getAffineMap(regionSymbols.size() + rank, 0, remapExprs);
auto indexRemap = AffineMap::get(regionSymbols.size() + rank, 0, remapExprs);
// Record the begin since it may be invalidated by memref replacement.
Block::iterator prevOfBegin;

View File

@ -119,8 +119,8 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
oldMemRefOperands.reserve(oldMemRefRank);
if (oldMap != builder.getMultiDimIdentityMap(oldMap.getNumDims())) {
for (auto resultExpr : oldMap.getResults()) {
auto singleResMap = builder.getAffineMap(
oldMap.getNumDims(), oldMap.getNumSymbols(), resultExpr);
auto singleResMap = AffineMap::get(oldMap.getNumDims(),
oldMap.getNumSymbols(), resultExpr);
auto afOp = builder.create<AffineApplyOp>(op->getLoc(), singleResMap,
oldMapOperands);
oldMemRefOperands.push_back(afOp);
@ -147,7 +147,7 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
indexRemap != builder.getMultiDimIdentityMap(indexRemap.getNumDims())) {
// Remapped indices.
for (auto resultExpr : indexRemap.getResults()) {
auto singleResMap = builder.getAffineMap(
auto singleResMap = AffineMap::get(
indexRemap.getNumDims(), indexRemap.getNumSymbols(), resultExpr);
auto afOp = builder.create<AffineApplyOp>(op->getLoc(), singleResMap,
remapOperands);
@ -210,7 +210,7 @@ LogicalResult mlir::replaceAllMemRefUsesWith(Value *oldMemRef, Value *newMemRef,
state.types.push_back(result->getType());
// Add attribute for 'newMap', other Attributes do not change.
auto newMapAttr = builder.getAffineMapAttr(newMap);
auto newMapAttr = AffineMapAttr::get(newMap);
for (auto namedAttr : op->getAttrs()) {
if (namedAttr.first == oldMapAttrPair.first) {
state.attributes.push_back({namedAttr.first, newMapAttr});
@ -371,8 +371,8 @@ void mlir::createAffineComputationSlice(
// Create an affine.apply for each of the map results.
sliceOps->reserve(composedMap.getNumResults());
for (auto resultExpr : composedMap.getResults()) {
auto singleResMap = builder.getAffineMap(
composedMap.getNumDims(), composedMap.getNumSymbols(), resultExpr);
auto singleResMap = AffineMap::get(composedMap.getNumDims(),
composedMap.getNumSymbols(), resultExpr);
sliceOps->push_back(builder.create<AffineApplyOp>(
opInst->getLoc(), singleResMap, composedOpOperands));
}
@ -457,7 +457,7 @@ LogicalResult mlir::normalizeMemRef(AllocOp allocOp) {
auto *oldMemRef = allocOp.getResult();
SmallVector<Value *, 4> symbolOperands(allocOp.getSymbolicOperands());
auto newMemRefType = b.getMemRefType(newShape, memrefType.getElementType(),
auto newMemRefType = MemRefType::get(newShape, memrefType.getElementType(),
b.getMultiDimIdentityMap(newRank));
auto newAlloc = b.create<AllocOp>(allocOp.getLoc(), newMemRefType);

View File

@ -772,7 +772,7 @@ static void computeMemoryOpIndices(Operation *op, AffineMap map,
OpBuilder builder(op);
for (auto resultExpr : map.getResults()) {
auto singleResMap =
builder.getAffineMap(map.getNumDims(), map.getNumSymbols(), resultExpr);
AffineMap::get(map.getNumDims(), map.getNumSymbols(), resultExpr);
auto afOp =
builder.create<AffineApplyOp>(op->getLoc(), singleResMap, mapOperands);
results.push_back(afOp);

View File

@ -772,7 +772,7 @@ TEST_FUNC(affine_if_op) {
builder.getAffineSymbolExpr(0), // s0 >= 0
builder.getAffineSymbolExpr(1) // s1 >= 0
};
auto intSet = builder.getIntegerSet(2, 2, affineExprs, isEq);
auto intSet = IntegerSet::get(2, 2, affineExprs, isEq);
SmallVector<Value *, 4> affineIfArgs = {zero, zero, ten, ten};
intrinsics::affine_if(intSet, affineIfArgs, /*withElseRegion=*/false);

View File

@ -71,7 +71,7 @@ protected:
OpBuilder opBuilder(module.body());
auto ptrType = spirv::PointerType::get(type, spirv::StorageClass::Uniform);
opBuilder.create<spirv::GlobalVariableOp>(
UnknownLoc::get(&context), opBuilder.getTypeAttr(ptrType),
UnknownLoc::get(&context), TypeAttr::get(ptrType),
opBuilder.getStringAttr(name), nullptr);
}