forked from OSchip/llvm-project
[mlir] Update accessors prefixed form (NFC)
This commit is contained in:
parent
bf18253b0e
commit
62fea88bc5
|
@ -601,8 +601,8 @@ def LLVM_ShuffleVectorOp : LLVM_Op<"shufflevector", [NoSideEffect]> {
|
|||
OpBuilder<(ins "Value":$v1, "Value":$v2, "ArrayAttr":$mask,
|
||||
CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>];
|
||||
let verifier = [{
|
||||
auto type1 = v1().getType();
|
||||
auto type2 = v2().getType();
|
||||
auto type1 = getV1().getType();
|
||||
auto type2 = getV2().getType();
|
||||
if (::mlir::LLVM::getVectorElementType(type1) !=
|
||||
::mlir::LLVM::getVectorElementType(type2))
|
||||
return emitOpError("expected matching LLVM IR Dialect element types");
|
||||
|
@ -701,7 +701,7 @@ def LLVM_ResumeOp : LLVM_TerminatorOp<"resume", []> {
|
|||
let arguments = (ins LLVM_Type:$value);
|
||||
string llvmBuilder = [{ builder.CreateResume($value); }];
|
||||
let verifier = [{
|
||||
if (!isa_and_nonnull<LandingpadOp>(value().getDefiningOp()))
|
||||
if (!isa_and_nonnull<LandingpadOp>(getValue().getDefiningOp()))
|
||||
return emitOpError("expects landingpad value as operand");
|
||||
// No check for personality of function - landingpad op verifies it.
|
||||
return success();
|
||||
|
@ -753,13 +753,13 @@ def LLVM_SwitchOp : LLVM_TerminatorOp<"switch",
|
|||
let extraClassDeclaration = [{
|
||||
/// Return the operands for the case destination block at the given index.
|
||||
OperandRange getCaseOperands(unsigned index) {
|
||||
return caseOperands()[index];
|
||||
return getCaseOperands()[index];
|
||||
}
|
||||
|
||||
/// Return a mutable range of operands for the case destination block at the
|
||||
/// given index.
|
||||
MutableOperandRange getCaseOperandsMutable(unsigned index) {
|
||||
return caseOperandsMutable()[index];
|
||||
return getCaseOperandsMutable()[index];
|
||||
}
|
||||
}];
|
||||
}
|
||||
|
@ -870,8 +870,8 @@ def LLVM_AddressOfOp : LLVM_Op<"mlir.addressof"> {
|
|||
CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs),
|
||||
[{
|
||||
build($_builder, $_state,
|
||||
LLVM::LLVMPointerType::get(global.getType(), global.addr_space()),
|
||||
global.sym_name());
|
||||
LLVM::LLVMPointerType::get(global.getType(), global.getAddrSpace()),
|
||||
global.getSymName());
|
||||
$_state.addAttributes(attrs);
|
||||
}]>,
|
||||
OpBuilder<(ins "LLVMFuncOp":$func,
|
||||
|
@ -1126,7 +1126,7 @@ def LLVM_GlobalOp : LLVM_Op<"mlir.global",
|
|||
}
|
||||
/// Return the initializer attribute if it exists, or a null attribute.
|
||||
Attribute getValueOrNull() {
|
||||
return value().getValueOr(Attribute());
|
||||
return getValue().getValueOr(Attribute());
|
||||
}
|
||||
/// Return the initializer region. This may be empty, but if it is not it
|
||||
/// terminates in an `llvm.return` op with the initializer value.
|
||||
|
|
|
@ -24,7 +24,7 @@ def FastmathFlagsInterface : OpInterface<"FastmathFlagsInterface"> {
|
|||
|
||||
let methods = [
|
||||
InterfaceMethod<"Get fastmath flags", "::mlir::LLVM::FastmathFlags",
|
||||
"fastmathFlags">,
|
||||
"getFastmathFlags">,
|
||||
];
|
||||
}
|
||||
|
||||
|
|
|
@ -220,7 +220,7 @@ def AtomicRMWOp : Std_Op<"atomic_rmw", [
|
|||
|
||||
let extraClassDeclaration = [{
|
||||
MemRefType getMemRefType() {
|
||||
return memref().getType().cast<MemRefType>();
|
||||
return getMemref().getType().cast<MemRefType>();
|
||||
}
|
||||
}];
|
||||
}
|
||||
|
@ -276,7 +276,7 @@ def GenericAtomicRMWOp : Std_Op<"generic_atomic_rmw", [
|
|||
return getRegion().getArgument(0);
|
||||
}
|
||||
MemRefType getMemRefType() {
|
||||
return memref().getType().cast<MemRefType>();
|
||||
return getMemref().getType().cast<MemRefType>();
|
||||
}
|
||||
}];
|
||||
}
|
||||
|
@ -555,7 +555,7 @@ def CondBranchOp : Std_Op<"cond_br",
|
|||
|
||||
/// Erase the operand at 'index' from the true operand list.
|
||||
void eraseTrueOperand(unsigned index) {
|
||||
trueDestOperandsMutable().erase(index);
|
||||
getTrueDestOperandsMutable().erase(index);
|
||||
}
|
||||
|
||||
// Accessors for operands to the 'false' destination.
|
||||
|
@ -575,7 +575,7 @@ def CondBranchOp : Std_Op<"cond_br",
|
|||
|
||||
/// Erase the operand at 'index' from the false operand list.
|
||||
void eraseFalseOperand(unsigned index) {
|
||||
falseDestOperandsMutable().erase(index);
|
||||
getFalseDestOperandsMutable().erase(index);
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -889,13 +889,13 @@ def SwitchOp : Std_Op<"switch",
|
|||
let extraClassDeclaration = [{
|
||||
/// Return the operands for the case destination block at the given index.
|
||||
OperandRange getCaseOperands(unsigned index) {
|
||||
return caseOperands()[index];
|
||||
return getCaseOperands()[index];
|
||||
}
|
||||
|
||||
/// Return a mutable range of operands for the case destination block at the
|
||||
/// given index.
|
||||
MutableOperandRange getCaseOperandsMutable(unsigned index) {
|
||||
return caseOperandsMutable()[index];
|
||||
return getCaseOperandsMutable()[index];
|
||||
}
|
||||
}];
|
||||
|
||||
|
|
|
@ -637,7 +637,7 @@ LogicalResult CmpIOpBooleanPattern::matchAndRewrite(
|
|||
#define DISPATCH(cmpPredicate, spirvOp) \
|
||||
case cmpPredicate: \
|
||||
rewriter.replaceOpWithNewOp<spirvOp>(op, op.getResult().getType(), \
|
||||
adaptor.lhs(), adaptor.rhs()); \
|
||||
adaptor.getLhs(), adaptor.getRhs()); \
|
||||
return success();
|
||||
|
||||
DISPATCH(arith::CmpIPredicate::eq, spirv::LogicalEqualOp);
|
||||
|
@ -669,7 +669,7 @@ CmpIOpPattern::matchAndRewrite(arith::CmpIOp op, OpAdaptor adaptor,
|
|||
"bitwidth emulation is not implemented yet on unsigned op"); \
|
||||
} \
|
||||
rewriter.replaceOpWithNewOp<spirvOp>(op, op.getResult().getType(), \
|
||||
adaptor.lhs(), adaptor.rhs()); \
|
||||
adaptor.getLhs(), adaptor.getRhs()); \
|
||||
return success();
|
||||
|
||||
DISPATCH(arith::CmpIPredicate::eq, spirv::IEqualOp);
|
||||
|
@ -699,7 +699,7 @@ CmpFOpPattern::matchAndRewrite(arith::CmpFOp op, OpAdaptor adaptor,
|
|||
#define DISPATCH(cmpPredicate, spirvOp) \
|
||||
case cmpPredicate: \
|
||||
rewriter.replaceOpWithNewOp<spirvOp>(op, op.getResult().getType(), \
|
||||
adaptor.lhs(), adaptor.rhs()); \
|
||||
adaptor.getLhs(), adaptor.getRhs()); \
|
||||
return success();
|
||||
|
||||
// Ordered.
|
||||
|
|
|
@ -43,7 +43,7 @@ struct ExpM1OpLowering : public ConvertOpToLLVMPattern<math::ExpM1Op> {
|
|||
LogicalResult
|
||||
matchAndRewrite(math::ExpM1Op op, OpAdaptor adaptor,
|
||||
ConversionPatternRewriter &rewriter) const override {
|
||||
auto operandType = adaptor.operand().getType();
|
||||
auto operandType = adaptor.getOperand().getType();
|
||||
|
||||
if (!operandType || !LLVM::isCompatibleType(operandType))
|
||||
return failure();
|
||||
|
@ -62,7 +62,7 @@ struct ExpM1OpLowering : public ConvertOpToLLVMPattern<math::ExpM1Op> {
|
|||
} else {
|
||||
one = rewriter.create<LLVM::ConstantOp>(loc, operandType, floatOne);
|
||||
}
|
||||
auto exp = rewriter.create<LLVM::ExpOp>(loc, adaptor.operand());
|
||||
auto exp = rewriter.create<LLVM::ExpOp>(loc, adaptor.getOperand());
|
||||
rewriter.replaceOpWithNewOp<LLVM::FSubOp>(op, operandType, exp, one);
|
||||
return success();
|
||||
}
|
||||
|
@ -96,7 +96,7 @@ struct Log1pOpLowering : public ConvertOpToLLVMPattern<math::Log1pOp> {
|
|||
LogicalResult
|
||||
matchAndRewrite(math::Log1pOp op, OpAdaptor adaptor,
|
||||
ConversionPatternRewriter &rewriter) const override {
|
||||
auto operandType = adaptor.operand().getType();
|
||||
auto operandType = adaptor.getOperand().getType();
|
||||
|
||||
if (!operandType || !LLVM::isCompatibleType(operandType))
|
||||
return rewriter.notifyMatchFailure(op, "unsupported operand type");
|
||||
|
@ -116,7 +116,7 @@ struct Log1pOpLowering : public ConvertOpToLLVMPattern<math::Log1pOp> {
|
|||
: rewriter.create<LLVM::ConstantOp>(loc, operandType, floatOne);
|
||||
|
||||
auto add = rewriter.create<LLVM::FAddOp>(loc, operandType, one,
|
||||
adaptor.operand());
|
||||
adaptor.getOperand());
|
||||
rewriter.replaceOpWithNewOp<LLVM::LogOp>(op, operandType, add);
|
||||
return success();
|
||||
}
|
||||
|
@ -150,7 +150,7 @@ struct RsqrtOpLowering : public ConvertOpToLLVMPattern<math::RsqrtOp> {
|
|||
LogicalResult
|
||||
matchAndRewrite(math::RsqrtOp op, OpAdaptor adaptor,
|
||||
ConversionPatternRewriter &rewriter) const override {
|
||||
auto operandType = adaptor.operand().getType();
|
||||
auto operandType = adaptor.getOperand().getType();
|
||||
|
||||
if (!operandType || !LLVM::isCompatibleType(operandType))
|
||||
return failure();
|
||||
|
@ -169,7 +169,7 @@ struct RsqrtOpLowering : public ConvertOpToLLVMPattern<math::RsqrtOp> {
|
|||
} else {
|
||||
one = rewriter.create<LLVM::ConstantOp>(loc, operandType, floatOne);
|
||||
}
|
||||
auto sqrt = rewriter.create<LLVM::SqrtOp>(loc, adaptor.operand());
|
||||
auto sqrt = rewriter.create<LLVM::SqrtOp>(loc, adaptor.getOperand());
|
||||
rewriter.replaceOpWithNewOp<LLVM::FDivOp>(op, operandType, one, sqrt);
|
||||
return success();
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ public:
|
|||
assert(adaptor.getOperands().size() == 1);
|
||||
Location loc = operation.getLoc();
|
||||
auto type =
|
||||
this->getTypeConverter()->convertType(operation.operand().getType());
|
||||
this->getTypeConverter()->convertType(operation.getOperand().getType());
|
||||
auto one = spirv::ConstantOp::getOne(type, operation.getLoc(), rewriter);
|
||||
auto onePlus =
|
||||
rewriter.create<spirv::FAddOp>(loc, one, adaptor.getOperands()[0]);
|
||||
|
|
|
@ -66,7 +66,7 @@ static bool matchSimpleReduction(Block &block) {
|
|||
/// scf.reduce.return %1
|
||||
template <
|
||||
typename CompareOpTy, typename SelectOpTy,
|
||||
typename Predicate = decltype(std::declval<CompareOpTy>().predicate())>
|
||||
typename Predicate = decltype(std::declval<CompareOpTy>().getPredicate())>
|
||||
static bool
|
||||
matchSelectReduction(Block &block, ArrayRef<Predicate> lessThanPredicates,
|
||||
ArrayRef<Predicate> greaterThanPredicates, bool &isMin) {
|
||||
|
|
|
@ -24,8 +24,8 @@ struct CeilDivUIOpConverter : public OpRewritePattern<arith::CeilDivUIOp> {
|
|||
LogicalResult matchAndRewrite(arith::CeilDivUIOp op,
|
||||
PatternRewriter &rewriter) const final {
|
||||
Location loc = op.getLoc();
|
||||
Value a = op.lhs();
|
||||
Value b = op.rhs();
|
||||
Value a = op.getLhs();
|
||||
Value b = op.getRhs();
|
||||
Value zero = rewriter.create<arith::ConstantOp>(
|
||||
loc, rewriter.getIntegerAttr(a.getType(), 0));
|
||||
Value compare =
|
||||
|
|
|
@ -1666,7 +1666,7 @@ static LogicalResult verify(GlobalOp op) {
|
|||
|
||||
LogicalResult
|
||||
GlobalCtorsOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
|
||||
for (Attribute ctor : ctors()) {
|
||||
for (Attribute ctor : getCtors()) {
|
||||
if (failed(verifySymbolAttrUse(ctor.cast<FlatSymbolRefAttr>(), *this,
|
||||
symbolTable)))
|
||||
return failure();
|
||||
|
@ -1675,7 +1675,7 @@ GlobalCtorsOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
|
|||
}
|
||||
|
||||
static LogicalResult verify(GlobalCtorsOp op) {
|
||||
if (op.ctors().size() != op.priorities().size())
|
||||
if (op.getCtors().size() != op.getPriorities().size())
|
||||
return op.emitError(
|
||||
"mismatch between the number of ctors and the number of priorities");
|
||||
return success();
|
||||
|
@ -1687,7 +1687,7 @@ static LogicalResult verify(GlobalCtorsOp op) {
|
|||
|
||||
LogicalResult
|
||||
GlobalDtorsOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
|
||||
for (Attribute dtor : dtors()) {
|
||||
for (Attribute dtor : getDtors()) {
|
||||
if (failed(verifySymbolAttrUse(dtor.cast<FlatSymbolRefAttr>(), *this,
|
||||
symbolTable)))
|
||||
return failure();
|
||||
|
@ -1696,7 +1696,7 @@ GlobalDtorsOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
|
|||
}
|
||||
|
||||
static LogicalResult verify(GlobalDtorsOp op) {
|
||||
if (op.dtors().size() != op.priorities().size())
|
||||
if (op.getDtors().size() != op.getPriorities().size())
|
||||
return op.emitError(
|
||||
"mismatch between the number of dtors and the number of priorities");
|
||||
return success();
|
||||
|
|
|
@ -41,13 +41,13 @@ LogicalResult
|
|||
PowFStrengthReduction::matchAndRewrite(math::PowFOp op,
|
||||
PatternRewriter &rewriter) const {
|
||||
Location loc = op.getLoc();
|
||||
Value x = op.lhs();
|
||||
Value x = op.getLhs();
|
||||
|
||||
FloatAttr scalarExponent;
|
||||
DenseFPElementsAttr vectorExponent;
|
||||
|
||||
bool isScalar = matchPattern(op.rhs(), m_Constant(&scalarExponent));
|
||||
bool isVector = matchPattern(op.rhs(), m_Constant(&vectorExponent));
|
||||
bool isScalar = matchPattern(op.getRhs(), m_Constant(&scalarExponent));
|
||||
bool isVector = matchPattern(op.getRhs(), m_Constant(&vectorExponent));
|
||||
|
||||
// Returns true if exponent is a constant equal to `value`.
|
||||
auto isExponentValue = [&](double value) -> bool {
|
||||
|
|
|
@ -23,13 +23,13 @@ using namespace mlir;
|
|||
/// 1) 1-exp^{-2x} / 1+exp^{-2x}, if x => 0
|
||||
/// 2) exp^{2x}-1 / exp^{2x}+1 , if x < 0
|
||||
static LogicalResult convertTanhOp(math::TanhOp op, PatternRewriter &rewriter) {
|
||||
auto floatType = op.operand().getType();
|
||||
auto floatType = op.getOperand().getType();
|
||||
Location loc = op.getLoc();
|
||||
auto floatOne = rewriter.getFloatAttr(floatType, 1.0);
|
||||
auto floatTwo = rewriter.getFloatAttr(floatType, 2.0);
|
||||
Value one = rewriter.create<arith::ConstantOp>(loc, floatOne);
|
||||
Value two = rewriter.create<arith::ConstantOp>(loc, floatTwo);
|
||||
Value doubledX = rewriter.create<arith::MulFOp>(loc, op.operand(), two);
|
||||
Value doubledX = rewriter.create<arith::MulFOp>(loc, op.getOperand(), two);
|
||||
|
||||
// Case 1: tanh(x) = 1-exp^{-2x} / 1+exp^{-2x}
|
||||
Value negDoubledX = rewriter.create<arith::NegFOp>(loc, doubledX);
|
||||
|
@ -48,7 +48,7 @@ static LogicalResult convertTanhOp(math::TanhOp op, PatternRewriter &rewriter) {
|
|||
auto floatZero = rewriter.getFloatAttr(floatType, 0.0);
|
||||
Value zero = rewriter.create<arith::ConstantOp>(loc, floatZero);
|
||||
Value cmpRes = rewriter.create<arith::CmpFOp>(loc, arith::CmpFPredicate::OGE,
|
||||
op.operand(), zero);
|
||||
op.getOperand(), zero);
|
||||
rewriter.replaceOpWithNewOp<SelectOp>(op, cmpRes, positiveRes, negativeRes);
|
||||
return success();
|
||||
}
|
||||
|
|
|
@ -295,10 +295,10 @@ public:
|
|||
LogicalResult
|
||||
TanhApproximation::matchAndRewrite(math::TanhOp op,
|
||||
PatternRewriter &rewriter) const {
|
||||
if (!getElementTypeOrSelf(op.operand()).isF32())
|
||||
if (!getElementTypeOrSelf(op.getOperand()).isF32())
|
||||
return rewriter.notifyMatchFailure(op, "unsupported operand type");
|
||||
|
||||
ArrayRef<int64_t> shape = vectorShape(op.operand());
|
||||
ArrayRef<int64_t> shape = vectorShape(op.getOperand());
|
||||
|
||||
ImplicitLocOpBuilder builder(op->getLoc(), rewriter);
|
||||
auto bcast = [&](Value value) -> Value {
|
||||
|
@ -308,12 +308,12 @@ TanhApproximation::matchAndRewrite(math::TanhOp op,
|
|||
// Clamp operand into [plusClamp, minusClamp] range.
|
||||
Value minusClamp = bcast(f32Cst(builder, -7.99881172180175781f));
|
||||
Value plusClamp = bcast(f32Cst(builder, 7.99881172180175781f));
|
||||
Value x = clamp(builder, op.operand(), minusClamp, plusClamp);
|
||||
Value x = clamp(builder, op.getOperand(), minusClamp, plusClamp);
|
||||
|
||||
// Mask for tiny values that are approximated with `operand`.
|
||||
Value tiny = bcast(f32Cst(builder, 0.0004f));
|
||||
Value tinyMask = builder.create<arith::CmpFOp>(
|
||||
arith::CmpFPredicate::OLT, builder.create<math::AbsOp>(op.operand()),
|
||||
arith::CmpFPredicate::OLT, builder.create<math::AbsOp>(op.getOperand()),
|
||||
tiny);
|
||||
|
||||
// The monomial coefficients of the numerator polynomial (odd).
|
||||
|
@ -383,10 +383,10 @@ template <typename Op>
|
|||
LogicalResult
|
||||
LogApproximationBase<Op>::logMatchAndRewrite(Op op, PatternRewriter &rewriter,
|
||||
bool base2) const {
|
||||
if (!getElementTypeOrSelf(op.operand()).isF32())
|
||||
if (!getElementTypeOrSelf(op.getOperand()).isF32())
|
||||
return rewriter.notifyMatchFailure(op, "unsupported operand type");
|
||||
|
||||
ArrayRef<int64_t> shape = vectorShape(op.operand());
|
||||
ArrayRef<int64_t> shape = vectorShape(op.getOperand());
|
||||
|
||||
ImplicitLocOpBuilder builder(op->getLoc(), rewriter);
|
||||
auto bcast = [&](Value value) -> Value {
|
||||
|
@ -415,7 +415,7 @@ LogApproximationBase<Op>::logMatchAndRewrite(Op op, PatternRewriter &rewriter,
|
|||
Value cstCephesLogP7 = bcast(f32Cst(builder, -2.4999993993E-1f));
|
||||
Value cstCephesLogP8 = bcast(f32Cst(builder, +3.3333331174E-1f));
|
||||
|
||||
Value x = op.operand();
|
||||
Value x = op.getOperand();
|
||||
|
||||
// Truncate input values to the minimum positive normal.
|
||||
x = max(builder, x, cstMinNormPos);
|
||||
|
@ -469,11 +469,11 @@ LogApproximationBase<Op>::logMatchAndRewrite(Op op, PatternRewriter &rewriter,
|
|||
}
|
||||
|
||||
Value invalidMask = builder.create<arith::CmpFOp>(arith::CmpFPredicate::ULT,
|
||||
op.operand(), cstZero);
|
||||
op.getOperand(), cstZero);
|
||||
Value zeroMask = builder.create<arith::CmpFOp>(arith::CmpFPredicate::OEQ,
|
||||
op.operand(), cstZero);
|
||||
op.getOperand(), cstZero);
|
||||
Value posInfMask = builder.create<arith::CmpFOp>(arith::CmpFPredicate::OEQ,
|
||||
op.operand(), cstPosInf);
|
||||
op.getOperand(), cstPosInf);
|
||||
|
||||
// Filter out invalid values:
|
||||
// • x == 0 -> -INF
|
||||
|
@ -530,10 +530,10 @@ public:
|
|||
LogicalResult
|
||||
Log1pApproximation::matchAndRewrite(math::Log1pOp op,
|
||||
PatternRewriter &rewriter) const {
|
||||
if (!getElementTypeOrSelf(op.operand()).isF32())
|
||||
if (!getElementTypeOrSelf(op.getOperand()).isF32())
|
||||
return rewriter.notifyMatchFailure(op, "unsupported operand type");
|
||||
|
||||
ArrayRef<int64_t> shape = vectorShape(op.operand());
|
||||
ArrayRef<int64_t> shape = vectorShape(op.getOperand());
|
||||
|
||||
ImplicitLocOpBuilder builder(op->getLoc(), rewriter);
|
||||
auto bcast = [&](Value value) -> Value {
|
||||
|
@ -547,7 +547,7 @@ Log1pApproximation::matchAndRewrite(math::Log1pOp op,
|
|||
// ^^^^^^^^^^^^^^^^^^^^^^
|
||||
// "logLarge" below.
|
||||
Value cstOne = bcast(f32Cst(builder, 1.0f));
|
||||
Value x = op.operand();
|
||||
Value x = op.getOperand();
|
||||
Value u = builder.create<arith::AddFOp>(x, cstOne);
|
||||
Value uSmall =
|
||||
builder.create<arith::CmpFOp>(arith::CmpFPredicate::OEQ, u, cstOne);
|
||||
|
@ -577,10 +577,10 @@ Log1pApproximation::matchAndRewrite(math::Log1pOp op,
|
|||
LogicalResult
|
||||
ErfPolynomialApproximation::matchAndRewrite(math::ErfOp op,
|
||||
PatternRewriter &rewriter) const {
|
||||
if (!getElementTypeOrSelf(op.operand()).isF32())
|
||||
if (!getElementTypeOrSelf(op.getOperand()).isF32())
|
||||
return rewriter.notifyMatchFailure(op, "unsupported operand type");
|
||||
|
||||
ArrayRef<int64_t> shape = vectorShape(op.operand());
|
||||
ArrayRef<int64_t> shape = vectorShape(op.getOperand());
|
||||
|
||||
ImplicitLocOpBuilder builder(op->getLoc(), rewriter);
|
||||
auto bcast = [&](Value value) -> Value {
|
||||
|
@ -637,9 +637,9 @@ ErfPolynomialApproximation::matchAndRewrite(math::ErfOp op,
|
|||
bounds[2] = bcast(f32Cst(builder, 3.75f));
|
||||
|
||||
Value isNegativeArg = builder.create<arith::CmpFOp>(arith::CmpFPredicate::OLT,
|
||||
op.operand(), zero);
|
||||
Value negArg = builder.create<arith::NegFOp>(op.operand());
|
||||
Value x = builder.create<SelectOp>(isNegativeArg, negArg, op.operand());
|
||||
op.getOperand(), zero);
|
||||
Value negArg = builder.create<arith::NegFOp>(op.getOperand());
|
||||
Value x = builder.create<SelectOp>(isNegativeArg, negArg, op.getOperand());
|
||||
|
||||
Value offset = offsets[0];
|
||||
Value p[polyDegree + 1];
|
||||
|
@ -701,10 +701,10 @@ public:
|
|||
LogicalResult
|
||||
ExpApproximation::matchAndRewrite(math::ExpOp op,
|
||||
PatternRewriter &rewriter) const {
|
||||
if (!getElementTypeOrSelf(op.operand()).isF32())
|
||||
if (!getElementTypeOrSelf(op.getOperand()).isF32())
|
||||
return rewriter.notifyMatchFailure(op, "unsupported operand type");
|
||||
|
||||
ArrayRef<int64_t> shape = vectorShape(op.operand());
|
||||
ArrayRef<int64_t> shape = vectorShape(op.getOperand());
|
||||
|
||||
ImplicitLocOpBuilder builder(op->getLoc(), rewriter);
|
||||
|
||||
|
@ -735,7 +735,7 @@ ExpApproximation::matchAndRewrite(math::ExpOp op,
|
|||
Value cstCephesExpP4 = bcast(f32Cst(builder, 0.03668965196652099192f));
|
||||
Value cstCephesExpP5 = bcast(f32Cst(builder, 0.01314350012789660196f));
|
||||
|
||||
Value x = op.operand();
|
||||
Value x = op.getOperand();
|
||||
|
||||
// Reduced y = x - floor(x / ln(2)) * ln(2) = x - k * ln(2)
|
||||
Value xL2Inv = mul(x, cstLog2E);
|
||||
|
@ -823,10 +823,10 @@ public:
|
|||
LogicalResult
|
||||
ExpM1Approximation::matchAndRewrite(math::ExpM1Op op,
|
||||
PatternRewriter &rewriter) const {
|
||||
if (!getElementTypeOrSelf(op.operand()).isF32())
|
||||
if (!getElementTypeOrSelf(op.getOperand()).isF32())
|
||||
return rewriter.notifyMatchFailure(op, "unsupported operand type");
|
||||
|
||||
ArrayRef<int64_t> shape = vectorShape(op.operand());
|
||||
ArrayRef<int64_t> shape = vectorShape(op.getOperand());
|
||||
|
||||
ImplicitLocOpBuilder builder(op->getLoc(), rewriter);
|
||||
auto bcast = [&](Value value) -> Value {
|
||||
|
@ -838,7 +838,7 @@ ExpM1Approximation::matchAndRewrite(math::ExpM1Op op,
|
|||
// and when the input is ~= -inf, i.e. u - 1 ~= -1.
|
||||
Value cstOne = bcast(f32Cst(builder, 1.0f));
|
||||
Value cstNegOne = bcast(f32Cst(builder, -1.0f));
|
||||
Value x = op.operand();
|
||||
Value x = op.getOperand();
|
||||
Value u = builder.create<math::ExpOp>(x);
|
||||
Value uEqOne =
|
||||
builder.create<arith::CmpFOp>(arith::CmpFPredicate::OEQ, u, cstOne);
|
||||
|
@ -892,10 +892,10 @@ LogicalResult SinAndCosApproximation<isSine, OpTy>::matchAndRewrite(
|
|||
llvm::is_one_of<OpTy, math::SinOp, math::CosOp>::value,
|
||||
"SinAndCosApproximation pattern expects math::SinOp or math::CosOp");
|
||||
|
||||
if (!getElementTypeOrSelf(op.operand()).isF32())
|
||||
if (!getElementTypeOrSelf(op.getOperand()).isF32())
|
||||
return rewriter.notifyMatchFailure(op, "unsupported operand type");
|
||||
|
||||
ArrayRef<int64_t> shape = vectorShape(op.operand());
|
||||
ArrayRef<int64_t> shape = vectorShape(op.getOperand());
|
||||
|
||||
ImplicitLocOpBuilder builder(op->getLoc(), rewriter);
|
||||
auto bcast = [&](Value value) -> Value {
|
||||
|
@ -941,7 +941,7 @@ LogicalResult SinAndCosApproximation<isSine, OpTy>::matchAndRewrite(
|
|||
Value twoOverPi = bcast(f32Cst(builder, TWO_OVER_PI));
|
||||
Value piOverTwo = bcast(f32Cst(builder, PI_OVER_2));
|
||||
|
||||
Value x = op.operand();
|
||||
Value x = op.getOperand();
|
||||
|
||||
Value k = floor(mul(x, twoOverPi));
|
||||
|
||||
|
@ -1015,10 +1015,10 @@ struct RsqrtApproximation : public OpRewritePattern<math::RsqrtOp> {
|
|||
LogicalResult
|
||||
RsqrtApproximation::matchAndRewrite(math::RsqrtOp op,
|
||||
PatternRewriter &rewriter) const {
|
||||
if (!getElementTypeOrSelf(op.operand()).isF32())
|
||||
if (!getElementTypeOrSelf(op.getOperand()).isF32())
|
||||
return rewriter.notifyMatchFailure(op, "unsupported operand type");
|
||||
|
||||
ArrayRef<int64_t> shape = vectorShape(op.operand());
|
||||
ArrayRef<int64_t> shape = vectorShape(op.getOperand());
|
||||
|
||||
// Only support already-vectorized rsqrt's.
|
||||
if (shape.empty() || shape.back() % 8 != 0)
|
||||
|
@ -1034,14 +1034,14 @@ RsqrtApproximation::matchAndRewrite(math::RsqrtOp op,
|
|||
Value cstNegHalf = bcast(f32Cst(builder, -0.5f));
|
||||
Value cstMinNormPos = bcast(f32FromBits(builder, 0x00800000u));
|
||||
|
||||
Value negHalf = builder.create<arith::MulFOp>(op.operand(), cstNegHalf);
|
||||
Value negHalf = builder.create<arith::MulFOp>(op.getOperand(), cstNegHalf);
|
||||
|
||||
// Select only the inverse sqrt of positive normals (denormals are
|
||||
// flushed to zero).
|
||||
Value ltMinMask = builder.create<arith::CmpFOp>(arith::CmpFPredicate::OLT,
|
||||
op.operand(), cstMinNormPos);
|
||||
Value ltMinMask = builder.create<arith::CmpFOp>(
|
||||
arith::CmpFPredicate::OLT, op.getOperand(), cstMinNormPos);
|
||||
Value infMask = builder.create<arith::CmpFOp>(arith::CmpFPredicate::OEQ,
|
||||
op.operand(), cstPosInf);
|
||||
op.getOperand(), cstPosInf);
|
||||
Value notNormalFiniteMask = builder.create<arith::OrIOp>(ltMinMask, infMask);
|
||||
|
||||
// Compute an approximate result.
|
||||
|
|
|
@ -161,7 +161,7 @@ static llvm::FastMathFlags getFastmathFlags(FastmathFlagsInterface &op) {
|
|||
// clang-format on
|
||||
};
|
||||
llvm::FastMathFlags ret;
|
||||
auto fmf = op.fastmathFlags();
|
||||
auto fmf = op.getFastmathFlags();
|
||||
for (auto it : handlers)
|
||||
if (bitEnumContains(fmf, it.first))
|
||||
(ret.*(it.second))(true);
|
||||
|
|
|
@ -610,8 +610,8 @@ LogicalResult ModuleTranslation::convertGlobals() {
|
|||
auto dtorOp = dyn_cast<GlobalDtorsOp>(op);
|
||||
if (!ctorOp && !dtorOp)
|
||||
continue;
|
||||
auto range = ctorOp ? llvm::zip(ctorOp.ctors(), ctorOp.priorities())
|
||||
: llvm::zip(dtorOp.dtors(), dtorOp.priorities());
|
||||
auto range = ctorOp ? llvm::zip(ctorOp.getCtors(), ctorOp.getPriorities())
|
||||
: llvm::zip(dtorOp.getDtors(), dtorOp.getPriorities());
|
||||
auto appendGlobalFn =
|
||||
ctorOp ? llvm::appendToGlobalCtors : llvm::appendToGlobalDtors;
|
||||
for (auto symbolAndPriority : range) {
|
||||
|
|
|
@ -121,6 +121,7 @@ static bool emitOneBuilder(const Record &record, raw_ostream &os) {
|
|||
llvm::raw_string_ostream bs(builder);
|
||||
while (auto loc = findNextVariable(builderStrRef)) {
|
||||
auto name = loc.in(builderStrRef).drop_front();
|
||||
auto getterName = op.getGetterName(name);
|
||||
// First, insert the non-matched part as is.
|
||||
bs << builderStrRef.substr(0, loc.pos);
|
||||
// Then, rewrite the name based on its kind.
|
||||
|
@ -128,13 +129,13 @@ static bool emitOneBuilder(const Record &record, raw_ostream &os) {
|
|||
if (isOperandName(op, name)) {
|
||||
auto result =
|
||||
isVariadicOperand
|
||||
? formatv("moduleTranslation.lookupValues(op.{0}())", name)
|
||||
: formatv("moduleTranslation.lookupValue(op.{0}())", name);
|
||||
? formatv("moduleTranslation.lookupValues(op.{0}())", getterName)
|
||||
: formatv("moduleTranslation.lookupValue(op.{0}())", getterName);
|
||||
bs << result;
|
||||
} else if (isAttributeName(op, name)) {
|
||||
bs << formatv("op.{0}()", name);
|
||||
bs << formatv("op.{0}()", getterName);
|
||||
} else if (isResultName(op, name)) {
|
||||
bs << formatv("moduleTranslation.mapValue(op.{0}())", name);
|
||||
bs << formatv("moduleTranslation.mapValue(op.{0}())", getterName);
|
||||
} else if (name == "_resultType") {
|
||||
bs << "moduleTranslation.convertType(op.getResult().getType())";
|
||||
} else if (name == "_hasResult") {
|
||||
|
|
Loading…
Reference in New Issue