[mlir] Don't use Optional::hasValue (NFC)

This commit is contained in:
Kazu Hirata 2022-06-20 11:22:37 -07:00
parent c0ecbfa4fd
commit 037f09959a
55 changed files with 91 additions and 91 deletions

View File

@ -64,7 +64,7 @@ class MinVersionBase<string name, I32EnumAttr scheme, I32EnumAttrCase min>
let queryFnName = "getMinVersion";
let mergeAction = "{ "
"if ($overall.hasValue()) { "
"if ($overall) { "
"$overall = static_cast<" # scheme.returnType # ">("
"std::max(*$overall, $instance)); "
"} else { $overall = $instance; }}";
@ -83,7 +83,7 @@ class MaxVersionBase<string name, I32EnumAttr scheme, I32EnumAttrCase max>
let queryFnName = "getMaxVersion";
let mergeAction = "{ "
"if ($overall.hasValue()) { "
"if ($overall) { "
"$overall = static_cast<" # scheme.returnType # ">("
"std::min(*$overall, $instance)); "
"} else { $overall = $instance; }}";

View File

@ -310,7 +310,7 @@ struct ComposeExpandOfCollapseOp : public OpRewritePattern<ExpandOpTy> {
auto composedReassociation = findCollapsingReassociation(
srcReassociation, resultReassociation, srcType.getShape(),
resultType.getShape());
if (!composedReassociation.hasValue())
if (!composedReassociation)
return failure();
rewriter.replaceOpWithNewOp<CollapseOpTy>(
@ -320,7 +320,7 @@ struct ComposeExpandOfCollapseOp : public OpRewritePattern<ExpandOpTy> {
auto composedReassociation =
findCollapsingReassociation(resultReassociation, srcReassociation,
resultType.getShape(), srcType.getShape());
if (!composedReassociation.hasValue())
if (!composedReassociation)
return failure();
rewriter.replaceOpWithNewOp<ExpandOpTy>(
@ -357,7 +357,7 @@ private:
// Find reassociation to collapse `srcSubShape` into `resultSubShape`.
auto subShapeReassociation =
getReassociationIndicesForCollapse(srcSubShape, resultSubShape);
if (!subShapeReassociation.hasValue())
if (!subShapeReassociation)
return llvm::None;
// Remap the subshape indices back to the original srcShape.

View File

@ -109,7 +109,7 @@ static APInt getLoopBoundFromFold(Optional<OpFoldResult> loopBound,
detail::IntRangeAnalysisImpl &analysis,
bool getUpper) {
unsigned int width = ConstantIntRanges::getStorageBitwidth(boundType);
if (loopBound.hasValue()) {
if (loopBound) {
if (loopBound->is<Attribute>()) {
if (auto bound =
loopBound->get<Attribute>().dyn_cast_or_null<IntegerAttr>())
@ -290,7 +290,7 @@ ChangeResult detail::IntRangeAnalysisImpl::visitNonControlFlowArguments(
// Infer bounds for loop arguments that have static bounds
if (auto loop = dyn_cast<LoopLikeOpInterface>(op)) {
Optional<Value> iv = loop.getSingleInductionVar();
if (!iv.hasValue()) {
if (!iv) {
return ForwardDataFlowAnalysis<
IntRangeLattice>::visitNonControlFlowArguments(op, region, operands);
}

View File

@ -1423,7 +1423,7 @@ Optional<int64_t> IntegerRelation::getConstantBoundOnDimSize(
}
}
}
if (lb && minDiff.hasValue()) {
if (lb && minDiff) {
// Set lb to the symbolic lower bound.
lb->resize(getNumSymbolIds() + 1);
if (ub)

View File

@ -191,7 +191,7 @@ struct RawBufferOpLowering : public ConvertOpToLLVMPattern<GpuOp> {
voffset =
voffset ? rewriter.create<LLVM::AddOp>(loc, voffset, index) : index;
}
if (adaptor.getIndexOffset().hasValue()) {
if (adaptor.getIndexOffset()) {
int32_t indexOffset = *gpuOp.getIndexOffset() * elementByteWidth;
Value extraOffsetConst = createI32Constant(rewriter, loc, indexOffset);
voffset =

View File

@ -897,7 +897,7 @@ public:
LogicalResult
matchAndRewrite(SPIRVOp op, typename SPIRVOp::Adaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
if (!op.memory_access().hasValue()) {
if (!op.memory_access()) {
return replaceWithLoadOrStore(op, adaptor.getOperands(), rewriter,
this->typeConverter, /*alignment=*/0,
/*isVolatile=*/false,

View File

@ -359,7 +359,7 @@ static bool LLVM_ATTRIBUTE_UNUSED areIdsUnique(
ArrayRef<Optional<Value>> maybeValues =
cst.getMaybeValues().slice(start, end - start);
for (Optional<Value> val : maybeValues) {
if (val.hasValue() && !uniqueIds.insert(val.getValue()).second)
if (val && !uniqueIds.insert(*val).second)
return false;
}
return true;
@ -831,7 +831,7 @@ static bool detectAsMod(const FlatAffineValueConstraints &cst, unsigned pos,
dimExpr.getPosition());
// If `id_n` has an upperbound that is less than the divisor, mod can be
// eliminated altogether.
if (ub.hasValue() && ub.getValue() < divisor)
if (ub && *ub < divisor)
memo[pos] = dimExpr;
else
memo[pos] = dimExpr % divisor;
@ -1330,7 +1330,7 @@ LogicalResult FlatAffineValueConstraints::addSliceBounds(
bool FlatAffineValueConstraints::findId(Value val, unsigned *pos) const {
unsigned i = 0;
for (const auto &mayBeId : values) {
if (mayBeId.hasValue() && mayBeId.getValue() == val) {
if (mayBeId && *mayBeId == val) {
*pos = i;
return true;
}

View File

@ -230,7 +230,7 @@ Optional<bool> ComputationSliceState::isSliceValid() {
// TODO: Store the result of the fast check, as it might be used again in
// `canRemoveSrcNodeAfterFusion`.
Optional<bool> isValidFastCheck = isSliceMaximalFastCheck();
if (isValidFastCheck.hasValue() && isValidFastCheck.getValue())
if (isValidFastCheck && *isValidFastCheck)
return true;
// Create constraints for the source loop nest using which slice is computed.
@ -292,7 +292,7 @@ Optional<bool> ComputationSliceState::isMaximal() const {
// Fast check to determine if the computation slice is maximal. If the result
// is inconclusive, we proceed with a more expensive analysis.
Optional<bool> isMaximalFastCheck = isSliceMaximalFastCheck();
if (isMaximalFastCheck.hasValue())
if (isMaximalFastCheck)
return isMaximalFastCheck;
// Create constraints for the src loop nest being sliced.
@ -630,7 +630,7 @@ Optional<int64_t> MemRefRegion::getRegionSize() {
// Compute the extents of the buffer.
Optional<int64_t> numElements = getConstantBoundingSizeAndShape();
if (!numElements.hasValue()) {
if (!numElements) {
LLVM_DEBUG(llvm::dbgs() << "Dynamic shapes not yet supported\n");
return None;
}
@ -960,7 +960,7 @@ mlir::computeSliceUnion(ArrayRef<Operation *> opsA, ArrayRef<Operation *> opsB,
// Check if the slice computed is valid. Return success only if it is verified
// that the slice is valid, otherwise return appropriate failure status.
Optional<bool> isSliceValid = sliceUnion->isSliceValid();
if (!isSliceValid.hasValue()) {
if (!isSliceValid) {
LLVM_DEBUG(llvm::dbgs() << "Cannot determine if the slice is valid\n");
return SliceComputationResult::GenericFailure;
}

View File

@ -1684,7 +1684,7 @@ struct AffineForEmptyLoopFolder : public OpRewritePattern<AffineForOp> {
if (forOp.getNumResults() == 0)
return success();
Optional<uint64_t> tripCount = getTrivialConstantTripCount(forOp);
if (tripCount.hasValue() && tripCount.getValue() == 0) {
if (tripCount && *tripCount == 0) {
// The initial values of the iteration arguments would be the op's
// results.
rewriter.replaceOp(forOp, forOp.getIterOperands());
@ -1771,7 +1771,7 @@ void AffineForOp::getSuccessorRegions(
// From the loop body, if the trip count is one, we can only branch back to
// the parent.
if (index.hasValue() && tripCount.hasValue() && tripCount.getValue() == 1) {
if (index && tripCount && *tripCount == 1) {
regions.push_back(RegionSuccessor(getResults()));
return;
}

View File

@ -633,7 +633,7 @@ static bool canRemoveSrcNodeAfterFusion(
// that all the dependences are preserved.
if (hasOutDepsAfterFusion || !escapingMemRefs.empty()) {
Optional<bool> isMaximal = fusionSlice.isMaximal();
if (!isMaximal.hasValue()) {
if (!isMaximal) {
LLVM_DEBUG(llvm::dbgs() << "Src loop can't be removed: can't determine "
"if fusion is maximal\n");
return false;
@ -1234,7 +1234,7 @@ static bool isFusionProfitable(Operation *srcOpInst, Operation *srcStoreOpInst,
// A simple cost model: fuse if it reduces the memory footprint.
if (!bestDstLoopDepth.hasValue()) {
if (!bestDstLoopDepth) {
LLVM_DEBUG(
llvm::dbgs()
<< "All fusion choices involve more than the threshold amount of "
@ -1242,7 +1242,7 @@ static bool isFusionProfitable(Operation *srcOpInst, Operation *srcStoreOpInst,
return false;
}
if (!bestDstLoopDepth.hasValue()) {
if (!bestDstLoopDepth) {
LLVM_DEBUG(llvm::dbgs() << "no fusion depth could be evaluated.\n");
return false;
}
@ -1263,7 +1263,7 @@ static bool isFusionProfitable(Operation *srcOpInst, Operation *srcStoreOpInst,
Optional<double> storageReduction = None;
if (!dstMemSize.hasValue() || !srcMemSize.hasValue()) {
if (!dstMemSize || !srcMemSize) {
LLVM_DEBUG(llvm::dbgs()
<< " fusion memory benefit cannot be evaluated; NOT fusing.\n");
return false;

View File

@ -93,7 +93,7 @@ void LoopUnroll::runOnOperation() {
// an outer one may delete gathered inner ones).
getOperation().walk([&](AffineForOp forOp) {
Optional<uint64_t> tripCount = getConstantTripCount(forOp);
if (tripCount.hasValue() && tripCount.getValue() <= unrollFullThreshold)
if (tripCount && *tripCount <= unrollFullThreshold)
loops.push_back(forOp);
});
for (auto forOp : loops)

View File

@ -234,7 +234,7 @@ static void findMatchingStartFinishInsts(
/// inserted right before where it was.
void PipelineDataTransfer::runOnAffineForOp(AffineForOp forOp) {
auto mayBeConstTripCount = getConstantTripCount(forOp);
if (!mayBeConstTripCount.hasValue()) {
if (!mayBeConstTripCount) {
LLVM_DEBUG(forOp.emitRemark("won't pipeline due to unknown trip count"));
return;
}

View File

@ -1663,7 +1663,7 @@ static void vectorizeLoops(Operation *parentOp, DenseSet<Operation *> &loops,
// Compute 1-D, 2-D or 3-D loop pattern to be matched on the target loops.
Optional<NestedPattern> pattern =
makePattern(loops, vectorSizes.size(), fastestVaryingPattern);
if (!pattern.hasValue()) {
if (!pattern) {
LLVM_DEBUG(dbgs() << "\n[early-vect] pattern couldn't be computed\n");
return;
}

View File

@ -503,7 +503,7 @@ bool mlir::getLoopNestStats(AffineForOp forOpRoot, LoopNestStats *stats) {
// Record trip count for 'forOp'. Set flag if trip count is not
// constant.
Optional<uint64_t> maybeConstTripCount = getConstantTripCount(forOp);
if (!maybeConstTripCount.hasValue()) {
if (!maybeConstTripCount) {
// Currently only constant trip count loop nests are supported.
LLVM_DEBUG(llvm::dbgs() << "Non-constant trip count unsupported\n");
return WalkResult::interrupt();

View File

@ -246,7 +246,7 @@ LogicalResult mlir::affineForOpBodySkew(AffineForOp forOp,
// better way to pipeline for such loops is to first tile them and extract
// constant trip count "full tiles" before applying this.
auto mayBeConstTripCount = getConstantTripCount(forOp);
if (!mayBeConstTripCount.hasValue()) {
if (!mayBeConstTripCount) {
LLVM_DEBUG(forOp.emitRemark("non-constant trip count loop not handled"));
return success();
}
@ -2094,7 +2094,7 @@ static LogicalResult generateCopy(
lbs.reserve(rank);
Optional<int64_t> numElements = region.getConstantBoundingSizeAndShape(
&fastBufferShape, &lbs, &lbDivisors);
if (!numElements.hasValue()) {
if (!numElements) {
LLVM_DEBUG(llvm::dbgs() << "Non-constant region size not supported\n");
return failure();
}

View File

@ -31,7 +31,7 @@ static ConstantIntRanges computeBoundsBy(ConstArithFn op, const APInt &minLeft,
const APInt &maxRight, bool isSigned) {
Optional<APInt> maybeMin = op(minLeft, minRight);
Optional<APInt> maybeMax = op(maxLeft, maxRight);
if (maybeMin.hasValue() && maybeMax.hasValue())
if (maybeMin && maybeMax)
return ConstantIntRanges::range(*maybeMin, *maybeMax, isSigned);
return ConstantIntRanges::maxRange(minLeft.getBitWidth());
}

View File

@ -77,7 +77,7 @@ void ExecuteOp::getSuccessorRegions(Optional<unsigned> index,
ArrayRef<Attribute>,
SmallVectorImpl<RegionSuccessor> &regions) {
// The `body` region branch back to the parent operation.
if (index.hasValue()) {
if (index) {
assert(*index == 0 && "invalid region index");
regions.push_back(RegionSuccessor(results()));
return;

View File

@ -185,7 +185,7 @@ LogicalResult AllocTensorOp::bufferize(RewriterBase &rewriter,
// Should the buffer be deallocated?
AnalysisState analysisState(options);
bool dealloc;
if (getEscape().hasValue()) {
if (getEscape()) {
dealloc = !*getEscape();
} else {
// No "escape" annotation found.

View File

@ -80,7 +80,7 @@ void BufferPlacementAllocs::build(Operation *op) {
// Find the associated dealloc value and register the allocation entry.
llvm::Optional<Operation *> dealloc = memref::findDealloc(allocValue);
// If the allocation has > 1 dealloc associated with it, skip handling it.
if (!dealloc.hasValue())
if (!dealloc)
return;
allocs.push_back(std::make_tuple(allocValue, *dealloc));
});

View File

@ -240,7 +240,7 @@ struct CallOpInterface
const FuncAnalysisState &funcState = getFuncAnalysisState(state);
Optional<int64_t> maybeEquiv =
getEquivalentFuncArgIdx(funcOp, funcState, opResult.getResultNumber());
if (maybeEquiv.hasValue()) {
if (maybeEquiv) {
#ifndef NDEBUG
SmallVector<OpOperand *> aliasingOpOperands =
getAliasingOpOperand(op, opResult, state);

View File

@ -86,7 +86,7 @@ struct TensorCopyInsertionPass
}
void runOnOperation() override {
if (options.hasValue()) {
if (options) {
if (failed(insertTensorCopies(getOperation(), *options)))
signalPassFailure();
} else {

View File

@ -310,7 +310,7 @@ static void printAsyncDependencies(OpAsmPrinter &printer, Operation *op,
//===----------------------------------------------------------------------===//
LogicalResult gpu::AllReduceOp::verifyRegions() {
if (body().empty() != op().hasValue())
if (body().empty() != op().has_value())
return emitError("expected either an op attribute or a non-empty body");
if (!body().empty()) {
if (body().getNumArguments() != 2)

View File

@ -215,7 +215,7 @@ void AllocaOp::print(OpAsmPrinter &p) {
FunctionType::get(getContext(), {getArraySize().getType()}, {getType()});
p << ' ' << getArraySize() << " x " << elemTy;
if (getAlignment().hasValue() && *getAlignment() != 0)
if (getAlignment() && *getAlignment() != 0)
p.printOptionalAttrDict((*this)->getAttrs(), {kElemTypeAttrName});
else
p.printOptionalAttrDict((*this)->getAttrs(),
@ -1040,7 +1040,7 @@ ParseResult InvokeOp::parse(OpAsmParser &parser, OperationState &result) {
LogicalResult LandingpadOp::verify() {
Value value;
if (LLVMFuncOp func = (*this)->getParentOfType<LLVMFuncOp>()) {
if (!func.getPersonality().hasValue())
if (!func.getPersonality())
return emitError(
"llvm.landingpad needs to be in a function with a personality");
}
@ -2748,7 +2748,7 @@ LogicalResult LLVMDialect::verifyOperationAttribute(Operation *op,
<< "' to be a dictionary attribute";
Optional<NamedAttribute> parallelAccessGroup =
loopAttr.getNamed(LLVMDialect::getParallelAccessAttrName());
if (parallelAccessGroup.hasValue()) {
if (parallelAccessGroup) {
auto accessGroups = parallelAccessGroup->getValue().dyn_cast<ArrayAttr>();
if (!accessGroups)
return op->emitOpError()
@ -3010,7 +3010,7 @@ LoopOptionsAttrBuilder &LoopOptionsAttrBuilder::setOption(LoopOptionCase tag,
auto option = llvm::find_if(
options, [tag](auto option) { return option.first == tag; });
if (option != options.end()) {
if (value.hasValue())
if (value)
option->second = *value;
else
options.erase(option);

View File

@ -203,7 +203,7 @@ void MmaOp::build(OpBuilder &builder, OperationState &result, Type resultType,
result.addOperands(operandB);
result.addOperands(operandC);
if (multiplicandPtxTypes.hasValue()) {
if (multiplicandPtxTypes) {
result.addAttribute("multiplicandAPtxType",
MMATypesAttr::get(ctx, (*multiplicandPtxTypes)[0]));
result.addAttribute("multiplicandBPtxType",
@ -215,7 +215,7 @@ void MmaOp::build(OpBuilder &builder, OperationState &result, Type resultType,
result.addAttribute("multiplicandBPtxType", MMATypesAttr::get(ctx, *res));
}
if (multiplicandLayouts.hasValue()) {
if (multiplicandLayouts) {
result.addAttribute("layoutA",
MMALayoutAttr::get(ctx, (*multiplicandLayouts)[0]));
result.addAttribute("layoutB",
@ -506,7 +506,7 @@ LogicalResult MmaOp::verify() {
}
// Ensure that binary MMA variants have a b1 MMA operation defined.
if (getMultiplicandAPtxType() == MMATypes::b1 && !getB1Op().hasValue()) {
if (getMultiplicandAPtxType() == MMATypes::b1 && !getB1Op()) {
return emitOpError("op requires " + getB1OpAttrName().strref() +
" attribute");
}
@ -515,7 +515,7 @@ LogicalResult MmaOp::verify() {
// attribute.
if (isInt4PtxType(*getMultiplicandAPtxType()) ||
isInt8PtxType(*getMultiplicandAPtxType())) {
if (!getIntOverflowBehavior().hasValue())
if (!getIntOverflowBehavior())
return emitOpError("op requires " +
getIntOverflowBehaviorAttrName().strref() +
" attribute");

View File

@ -101,7 +101,7 @@ static void buildStructuredOp(OpBuilder &b, OperationState &state,
// Derive the result types if needed.
SmallVector<Type> derivedResultTypes =
resultTensorTypes.value_or(TypeRange());
if (!resultTensorTypes.hasValue())
if (!resultTensorTypes)
copy_if(outputs.getTypes(), std::back_inserter(derivedResultTypes),
[](Type type) { return type.isa<RankedTensorType>(); });

View File

@ -372,7 +372,7 @@ mlir::linalg::promoteSubviewsPrecondition(Operation *op,
auto sv =
isa_and_nonnull<memref::SubViewOp>(opOperand->get().getDefiningOp());
if (sv) {
if (!options.operandsToPromote.hasValue() ||
if (!options.operandsToPromote ||
options.operandsToPromote->count(opOperand->getOperandNumber()))
return success();
}

View File

@ -254,7 +254,7 @@ void getUpperBoundForIndex(Value value, AffineMap &boundMap,
if (constantRequired) {
auto ubConst = constraints.getConstantBound(
FlatAffineValueConstraints::BoundType::UB, pos);
if (!ubConst.hasValue())
if (!ubConst)
return;
boundMap =
@ -474,7 +474,7 @@ void GenerateLoopNest<scf::ForOp>::doit(
// Create procInfo so it dominates loops, if appropriate.
SmallVector<ProcInfo, 4> procInfo;
SmallVector<DistributionMethod, 0> distributionMethod;
if (distributionOptions.hasValue()) {
if (distributionOptions) {
// Collect loop ranges for parallel dimensions.
SmallVector<Range, 2> parallelLoopRanges;
for (const auto &iteratorType : enumerate(iteratorTypes))

View File

@ -290,7 +290,7 @@ ParseResult AllocaScopeOp::parse(OpAsmParser &parser, OperationState &result) {
void AllocaScopeOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
if (index.hasValue()) {
if (index) {
regions.push_back(RegionSuccessor(getResults()));
return;
}
@ -756,7 +756,7 @@ Optional<int64_t> DimOp::getConstantIndex() {
LogicalResult DimOp::verify() {
// Assume unknown index to be in range.
Optional<int64_t> index = getConstantIndex();
if (!index.hasValue())
if (!index)
return success();
// Check that constant index is not knowingly out of range.
@ -2323,7 +2323,7 @@ isRankReducedMemRefType(MemRefType originalType,
originalType, candidateRankReducedType, sizes);
// Sizes cannot be matched in case empty vector is returned.
if (!optionalUnusedDimsMask.hasValue())
if (!optionalUnusedDimsMask)
return SliceVerificationResult::LayoutMismatch;
if (originalType.getMemorySpace() !=

View File

@ -184,7 +184,7 @@ verifyScheduleModifiers(OpAsmParser &parser,
// Translate the string. If it has no value, then it was not a valid
// modifier!
auto symbol = symbolizeScheduleModifier(mod);
if (!symbol.hasValue())
if (!symbol)
return parser.emitError(parser.getNameLoc())
<< " unknown modifier type: " << mod;
}

View File

@ -248,7 +248,7 @@ void ExecuteRegionOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
// If the predecessor is the ExecuteRegionOp, branch into the body.
if (!index.hasValue()) {
if (!index) {
regions.push_back(RegionSuccessor(&getRegion()));
return;
}
@ -491,7 +491,7 @@ void ForOp::getSuccessorRegions(Optional<unsigned> index,
SmallVectorImpl<RegionSuccessor> &regions) {
// If the predecessor is the ForOp, branch into the body using the iterator
// arguments.
if (!index.hasValue()) {
if (!index) {
regions.push_back(RegionSuccessor(&getLoopBody(), getRegionIterArgs()));
return;
}
@ -1475,7 +1475,7 @@ void IfOp::getSuccessorRegions(Optional<unsigned> index,
ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
// The `then` and the `else` region branch back to the parent operation.
if (index.hasValue()) {
if (index) {
regions.push_back(RegionSuccessor(getResults()));
return;
}
@ -2632,7 +2632,7 @@ void WhileOp::getSuccessorRegions(Optional<unsigned> index,
ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
// The parent op always branches to the condition region.
if (!index.hasValue()) {
if (!index) {
regions.emplace_back(&getBefore(), getBefore().getArguments());
return;
}

View File

@ -870,7 +870,7 @@ static ParseResult parseGroupNonUniformArithmeticOp(OpAsmParser &parser,
if (parser.resolveOperand(valueInfo, resultType, state.operands))
return failure();
if (clusterSizeInfo.hasValue()) {
if (clusterSizeInfo) {
Type i32Type = parser.getBuilder().getIntegerType(32);
if (parser.resolveOperand(*clusterSizeInfo, i32Type, state.operands))
return failure();

View File

@ -367,7 +367,7 @@ void AssumingOp::getSuccessorRegions(
// AssumingOp has unconditional control flow into the region and back to the
// parent, so return the correct RegionSuccessor purely based on the index
// being None or 0.
if (index.hasValue()) {
if (index) {
regions.push_back(RegionSuccessor(getResults()));
return;
}

View File

@ -411,7 +411,7 @@ public:
if (!enc)
return failure();
Optional<int64_t> index = op.getConstantIndex();
if (!index.hasValue())
if (!index)
return failure();
// Generate the call.
Value src = adaptor.getOperands()[0];

View File

@ -303,7 +303,7 @@ Optional<int64_t> DimOp::getConstantIndex() {
LogicalResult DimOp::verify() {
// Assume unknown index to be in range.
Optional<int64_t> index = getConstantIndex();
if (!index.hasValue())
if (!index)
return success();
// Check that constant index is not knowingly out of range.

View File

@ -130,7 +130,7 @@ public:
loc, weightTy, reverse1, rewriter.getI64IntegerAttr(2));
Value conv2d;
if (op.quantization_info().hasValue()) {
if (op.quantization_info()) {
conv2d = rewriter.create<tosa::Conv2DOp>(
loc, resultTy, input, reverse2, bias,
rewriter.getI64ArrayAttr(convPad), rewriter.getI64ArrayAttr(stride),
@ -297,7 +297,7 @@ public:
// Perform the convolution using the zero bias.
Value conv2d;
if (op.quantization_info().hasValue()) {
if (op.quantization_info()) {
conv2d = createOpAndInfer<tosa::Conv2DOp>(
rewriter, loc, UnrankedTensorType::get(resultETy), input,
weight, zeroBias,

View File

@ -124,7 +124,7 @@ LogicalResult PatternApplicatorExtension::findAllMatches(
OperandRange
transform::AlternativesOp::getSuccessorEntryOperands(Optional<unsigned> index) {
if (index.hasValue() && getOperation()->getNumOperands() == 1)
if (index && getOperation()->getNumOperands() == 1)
return getOperation()->getOperands();
return OperandRange(getOperation()->operand_end(),
getOperation()->operand_end());
@ -471,7 +471,7 @@ transform::SequenceOp::getSuccessorEntryOperands(Optional<unsigned> index) {
void transform::SequenceOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
if (!index.hasValue()) {
if (!index) {
Region *bodyRegion = &getBody();
regions.emplace_back(bodyRegion, !operands.empty()
? bodyRegion->getArguments()

View File

@ -4759,7 +4759,7 @@ ParseResult WarpExecuteOnLane0Op::parse(OpAsmParser &parser,
void WarpExecuteOnLane0Op::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
if (index.hasValue()) {
if (index) {
regions.push_back(RegionSuccessor(getResults()));
return;
}

View File

@ -169,7 +169,7 @@ getUnrollOrder(unsigned numLoops, Operation *op,
llvm::to_vector(llvm::seq<int64_t>(0, static_cast<int64_t>(numLoops)));
if (options.traversalOrderCallback != nullptr) {
Optional<SmallVector<int64_t>> order = options.traversalOrderCallback(op);
if (order.hasValue()) {
if (order) {
loopOrder = std::move(*order);
}
}

View File

@ -1013,7 +1013,7 @@ void SSANameState::printValueID(Value value, bool printResultNo,
stream << nameIt->second;
}
if (resultNo.hasValue() && printResultNo)
if (resultNo && printResultNo)
stream << '#' << resultNo;
}

View File

@ -460,7 +460,7 @@ mlir::isRankReducedType(ShapedType originalType,
computeRankReductionMask(originalShape, candidateReducedShape);
// Sizes cannot be matched in case empty vector is returned.
if (!optionalUnusedDimsMask.hasValue())
if (!optionalUnusedDimsMask)
return SliceVerificationResult::SizeMismatch;
if (originalShapedType.getElementType() !=

View File

@ -599,7 +599,7 @@ AffineExpr AffineParser::parseAffineConstraint(bool *isEq) {
if (consumeIf(Token::greater) && consumeIf(Token::equal) &&
getToken().is(Token::integer)) {
auto dim = getToken().getUnsignedIntegerValue();
if (dim.hasValue() && dim.getValue() == 0) {
if (dim && *dim == 0) {
consumeToken(Token::integer);
*isEq = false;
return expr;
@ -610,7 +610,7 @@ AffineExpr AffineParser::parseAffineConstraint(bool *isEq) {
if (consumeIf(Token::equal) && consumeIf(Token::equal) &&
getToken().is(Token::integer)) {
auto dim = getToken().getUnsignedIntegerValue();
if (dim.hasValue() && dim.getValue() == 0) {
if (dim && *dim == 0) {
consumeToken(Token::integer);
*isEq = true;
return expr;

View File

@ -306,7 +306,7 @@ public:
// Check for a floating point value.
if (curTok.is(Token::floatliteral)) {
auto val = curTok.getFloatingPointValue();
if (!val.hasValue())
if (!val)
return emitError(loc, "floating point value too large");
parser.consumeToken(Token::floatliteral);
result = isNegative ? -*val : *val;

View File

@ -312,7 +312,7 @@ ParseResult Parser::parseAttributeDict(NamedAttrList &attributes) {
/// Parse a float attribute.
Attribute Parser::parseFloatAttr(Type type, bool isNegative) {
auto val = getToken().getFloatingPointValue();
if (!val.hasValue())
if (!val)
return (emitError("floating point value too large for attribute"), nullptr);
consumeToken(Token::floatliteral);
if (!type) {
@ -517,7 +517,7 @@ DenseElementsAttr TensorLiteralParser::getAttr(SMLoc loc,
Type eltType = type.getElementType();
// Check to see if we parse the literal from a hex string.
if (hexStorage.hasValue() &&
if (hexStorage &&
(eltType.isIntOrIndexOrFloat() || eltType.isa<ComplexType>()))
return getHexAttr(loc, type);
@ -530,7 +530,7 @@ DenseElementsAttr TensorLiteralParser::getAttr(SMLoc loc,
}
// Handle the case where no elements were parsed.
if (!hexStorage.hasValue() && storage.empty() && type.getNumElements()) {
if (!hexStorage && storage.empty() && type.getNumElements()) {
p.emitError(loc) << "parsed zero elements, but type (" << type
<< ") expected at least 1";
return nullptr;
@ -648,7 +648,7 @@ TensorLiteralParser::getFloatAttrElements(SMLoc loc, FloatType eltTy,
// Build the float values from tokens.
auto val = token.getFloatingPointValue();
if (!val.hasValue())
if (!val)
return p.emitError("floating point value too large for attribute");
APFloat apVal(isNegative ? -*val : *val);

View File

@ -104,7 +104,7 @@ ParseResult Parser::parseNameOrFileLineColLocation(LocationAttr &loc) {
return emitWrongTokenError(
"expected integer line number in FileLineColLoc");
auto line = getToken().getUnsignedIntegerValue();
if (!line.hasValue())
if (!line)
return emitWrongTokenError(
"expected integer line number in FileLineColLoc");
consumeToken(Token::integer);

View File

@ -275,7 +275,7 @@ ParseResult Parser::parseFloatFromIntegerLiteral(
}
Optional<uint64_t> value = tok.getUInt64IntegerValue();
if (!value.hasValue())
if (!value)
return emitError(loc, "hexadecimal float constant out of range for type");
if (&semantics == &APFloat::IEEEdouble()) {
@ -949,7 +949,7 @@ ParseResult OperationParser::parseOperation() {
// Check that number of results is > 0.
auto val = getToken().getUInt64IntegerValue();
if (!val.hasValue() || val.getValue() < 1)
if (!val || *val < 1)
return emitError(
"expected named operation to have at least 1 result");
consumeToken(Token::integer);
@ -1691,7 +1691,7 @@ OperationParser::parseCustomOperation(ArrayRef<ResultRecord> resultIDs) {
Optional<Dialect::ParseOpHook> dialectHook;
if (Dialect *dialect = opNameInfo->getDialect())
dialectHook = dialect->getParseOperationHook(opName);
if (!dialectHook.hasValue()) {
if (!dialectHook) {
InFlightDiagnostic diag =
emitError(opLoc) << "custom op '" << originalOpName << "' is unknown";
if (originalOpName != opName)

View File

@ -367,7 +367,7 @@ void Operator::populateTypeInferenceInfo(
for (auto me = ecs.member_end(); mi != me; ++mi) {
if (*mi < 0) {
auto tc = getResultTypeConstraint(i);
if (tc.getBuilderCall().hasValue()) {
if (tc.getBuilderCall()) {
resultTypeMapping[i].emplace_back(tc);
found = true;
}

View File

@ -219,7 +219,7 @@ static void setLoopMetadata(Operation &opInst, llvm::Instruction &llvmInst,
auto loopAttr = attr.cast<DictionaryAttr>();
auto parallelAccessGroup =
loopAttr.getNamed(LLVMDialect::getParallelAccessAttrName());
if (parallelAccessGroup.hasValue()) {
if (parallelAccessGroup) {
SmallVector<llvm::Metadata *> parallelAccess;
parallelAccess.push_back(
llvm::MDString::get(ctx, "llvm.loop.parallel_accesses"));

View File

@ -875,7 +875,7 @@ LogicalResult ModuleTranslation::convertOneFunction(LLVMFuncOp func) {
}
// Check the personality and set it.
if (func.getPersonality().hasValue()) {
if (func.getPersonality()) {
llvm::Type *ty = llvm::Type::getInt8PtrTy(llvmFunc->getContext());
if (llvm::Constant *pfunc = getLLVMConstant(ty, func.getPersonalityAttr(),
func.getLoc(), *this))

View File

@ -24,7 +24,7 @@ namespace mlir {
LogicalResult spirv::serialize(spirv::ModuleOp module,
SmallVectorImpl<uint32_t> &binary,
const SerializationOptions &options) {
if (!module.vce_triple().hasValue())
if (!module.vce_triple())
return module.emitError(
"module must have 'vce_triple' attribute to be serializeable");

View File

@ -37,7 +37,7 @@ static bool mapOptOrNull(const llvm::json::Value &params,
// Field is missing or null.
auto *v = o->get(prop);
if (!v || v->getAsNull().hasValue())
if (!v || v->getAsNull())
return true;
return fromJSON(*v, out, path.field(prop));
}
@ -545,7 +545,7 @@ llvm::json::Value mlir::lsp::toJSON(const MarkupContent &mc) {
llvm::json::Value mlir::lsp::toJSON(const Hover &hover) {
llvm::json::Object result{{"contents", toJSON(hover.contents)}};
if (hover.range.hasValue())
if (hover.range)
result["range"] = toJSON(*hover.range);
return std::move(result);
}

View File

@ -34,7 +34,7 @@ static bool mapOptOrNull(const llvm::json::Value &params,
// Field is missing or null.
auto *v = o->get(prop);
if (!v || v->getAsNull().hasValue())
if (!v || v->getAsNull())
return true;
return fromJSON(*v, out, path.field(prop));
}

View File

@ -127,7 +127,7 @@ void VectorizerTestPass::testVectorShapeRatio(llvm::raw_ostream &outs) {
// future we can always extend.
auto superVectorType = opInst->getResult(0).getType().cast<VectorType>();
auto ratio = shapeRatio(superVectorType, subVectorType);
if (!ratio.hasValue()) {
if (!ratio) {
opInst->emitRemark("NOT MATCHED");
} else {
outs << "\nmatched: " << *opInst << " with shape ratio: ";

View File

@ -115,7 +115,7 @@ mlir::test::TestProduceParamOrForwardOperandOp::apply(
}
LogicalResult mlir::test::TestProduceParamOrForwardOperandOp::verify() {
if (getParameter().hasValue() ^ (getNumOperands() != 1))
if (getParameter().has_value() ^ (getNumOperands() != 1))
return emitOpError() << "expects either a parameter or an operand";
return success();
}

View File

@ -413,7 +413,7 @@ struct TestVectorDistributePatterns
perm, ctx);
Optional<mlir::vector::DistributeOps> ops = distributPointwiseVectorOp(
builder, op.getOperation(), ids, mul, map);
if (ops.hasValue()) {
if (ops) {
SmallPtrSet<Operation *, 1> extractOp({ops->extract, ops->insert});
op.getResult().replaceAllUsesExcept(ops->insert.getResult(),
extractOp);
@ -474,7 +474,7 @@ struct TestVectorToLoopPatterns
Optional<mlir::vector::DistributeOps> ops = distributPointwiseVectorOp(
builder, op.getOperation(), {forOp.getInductionVar()}, {multiplicity},
map);
if (ops.hasValue()) {
if (ops) {
SmallPtrSet<Operation *, 1> extractOp({ops->extract, ops->insert});
op.getResult().replaceAllUsesExcept(ops->insert.getResult(), extractOp);
}

View File

@ -1001,7 +1001,7 @@ static void genCustomDirectiveParser(CustomDirective *dir, MethodBody &body) {
} else if (auto *operand = dyn_cast<OperandVariable>(param)) {
const NamedTypeConstraint *var = operand->getVar();
if (var->isOptional()) {
body << llvm::formatv(" if ({0}Operand.hasValue())\n"
body << llvm::formatv(" if ({0}Operand.has_value())\n"
" {0}Operands.push_back(*{0}Operand);\n",
var->name);
} else if (var->isVariadicOfVariadic()) {