forked from OSchip/llvm-project
Apply clang-tidy fixes for readability-simplify-boolean-expr to MLIR (NFC)
Reviewed By: rriddle, Mogball Differential Revision: https://reviews.llvm.org/D116253
This commit is contained in:
parent
5a1f6077ec
commit
6786d7e4f5
|
@ -564,7 +564,7 @@ LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth,
|
|||
for (auto id : ids) {
|
||||
AffineForOp iv;
|
||||
if ((iv = getForInductionVarOwner(id)) &&
|
||||
llvm::is_contained(enclosingIVs, iv) == false) {
|
||||
!llvm::is_contained(enclosingIVs, iv)) {
|
||||
cst.projectOut(id);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1014,11 +1014,7 @@ static bool createReassociationMapsForCollapse(
|
|||
|
||||
// If both iterators didn't reach the end, we have leftover dimentions which
|
||||
// implies that we have a mismatch in shape.
|
||||
if (currSrcDim != srcShape.size() || currDstDim != dstShape.size()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
return !(currSrcDim != srcShape.size() || currDstDim != dstShape.size());
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
|
|
@ -84,9 +84,7 @@ static bool transferReadSupportsMMAMatrixType(vector::TransferReadOp readOp) {
|
|||
readOp.getContext());
|
||||
// TODO: Support transpose once it is added to GPU dialect ops.
|
||||
// For now we only support (d0, d1) -> (d0, d1) and (d0, d1) -> (0, d1).
|
||||
if (!map.isMinorIdentity() && map != broadcastInnerDim)
|
||||
return false;
|
||||
return true;
|
||||
return !(!map.isMinorIdentity() && map != broadcastInnerDim);
|
||||
}
|
||||
|
||||
// Return true if the transfer op can be converted to a MMA matrix store.
|
||||
|
|
|
@ -472,10 +472,7 @@ wouldCreateWriteToNonWritableBuffer(OpOperand &opOperand, OpResult opResult,
|
|||
bool hasWrite = aliasesInPlaceWrite(opResult, aliasInfo, state) ||
|
||||
aliasesInPlaceWrite(opOperand.get(), aliasInfo, state) ||
|
||||
state.bufferizesToMemoryWrite(opOperand);
|
||||
if (!hasWrite)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
return hasWrite;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
|
|
@ -425,12 +425,10 @@ LogicalResult mlir::linalg::comprehensive_bufferize::linalg_ext::
|
|||
// TODO: Support cases such as extract_slice(init_tensor).
|
||||
SmallVector<OpOperand *> opOperands =
|
||||
state.getAliasingOpOperand(opResult);
|
||||
if (!llvm::all_of(opOperands, [&](OpOperand *operand) {
|
||||
return aliasInfo.areEquivalentBufferizedValues(operand->get(),
|
||||
opResult);
|
||||
}))
|
||||
return true;
|
||||
return false;
|
||||
return !llvm::all_of(opOperands, [&](OpOperand *operand) {
|
||||
return aliasInfo.areEquivalentBufferizedValues(operand->get(),
|
||||
opResult);
|
||||
});
|
||||
});
|
||||
|
||||
// Replace only if the reverse use-def chain ends at exactly one
|
||||
|
|
|
@ -543,14 +543,11 @@ struct LinalgDetensorize : public LinalgDetensorizeBase<LinalgDetensorize> {
|
|||
if (op->hasTrait<OpTrait::FunctionLike>()) {
|
||||
auto &body = function_like_impl::getFunctionBody(op);
|
||||
return llvm::all_of(llvm::drop_begin(body, 1), [&](Block &block) {
|
||||
if (llvm::any_of(
|
||||
blockArgsToDetensor, [&](BlockArgument blockArgument) {
|
||||
return blockArgument.getOwner() == &block &&
|
||||
!typeConverter.isLegal(blockArgument.getType());
|
||||
})) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
return !llvm::any_of(
|
||||
blockArgsToDetensor, [&](BlockArgument blockArgument) {
|
||||
return blockArgument.getOwner() == &block &&
|
||||
!typeConverter.isLegal(blockArgument.getType());
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -428,10 +428,7 @@ bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
|
|||
|
||||
auto aMemSpace = (aT) ? aT.getMemorySpace() : uaT.getMemorySpace();
|
||||
auto bMemSpace = (bT) ? bT.getMemorySpace() : ubT.getMemorySpace();
|
||||
if (aMemSpace != bMemSpace)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
return aMemSpace == bMemSpace;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
|
|
@ -146,10 +146,7 @@ bool mlir::isLegalForReturnOpTypeConversionPattern(Operation *op,
|
|||
|
||||
// ReturnLike operations have to be legalized with their parent. For
|
||||
// return this is handled, for other ops they remain as is.
|
||||
if (op->hasTrait<OpTrait::ReturnLike>())
|
||||
return true;
|
||||
|
||||
return false;
|
||||
return op->hasTrait<OpTrait::ReturnLike>();
|
||||
}
|
||||
|
||||
bool mlir::isNotBranchOpInterfaceOrReturnLikeOp(Operation *op) {
|
||||
|
|
|
@ -302,11 +302,7 @@ bool matcher::operatesOnSuperVectorsOf(Operation &op,
|
|||
// This could be useful information if we wanted to reshape at the level of
|
||||
// the vector type (but we would have to look at the compute and distinguish
|
||||
// between parallel, reduction and possibly other cases.
|
||||
if (!ratio.hasValue()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
return ratio.hasValue();
|
||||
}
|
||||
|
||||
bool mlir::isDisjointTransferIndices(VectorTransferOpInterface transferA,
|
||||
|
|
|
@ -92,13 +92,11 @@ void NormalizeMemRefs::runOnOperation() {
|
|||
/// are satisfied will the value become a candidate for replacement.
|
||||
/// TODO: Extend this for DimOps.
|
||||
static bool isMemRefNormalizable(Value::user_range opUsers) {
|
||||
if (llvm::any_of(opUsers, [](Operation *op) {
|
||||
if (op->hasTrait<OpTrait::MemRefsNormalizable>())
|
||||
return false;
|
||||
return true;
|
||||
}))
|
||||
return false;
|
||||
return true;
|
||||
return !llvm::any_of(opUsers, [](Operation *op) {
|
||||
if (op->hasTrait<OpTrait::MemRefsNormalizable>())
|
||||
return false;
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
/// Set all the calling functions and the callees of the function as not
|
||||
|
|
|
@ -54,8 +54,7 @@ static void getLoadAndStoreMemRefAccesses(Operation *opA,
|
|||
static bool isDependentLoadOrStoreOp(Operation *op,
|
||||
DenseMap<Value, bool> &values) {
|
||||
if (auto loadOp = dyn_cast<AffineReadOpInterface>(op)) {
|
||||
return values.count(loadOp.getMemRef()) > 0 &&
|
||||
values[loadOp.getMemRef()] == true;
|
||||
return values.count(loadOp.getMemRef()) > 0 && values[loadOp.getMemRef()];
|
||||
}
|
||||
if (auto storeOp = dyn_cast<AffineWriteOpInterface>(op)) {
|
||||
return values.count(storeOp.getMemRef()) > 0;
|
||||
|
|
|
@ -1345,9 +1345,7 @@ static bool areInnerBoundsInvariant(AffineForOp forOp) {
|
|||
}
|
||||
return WalkResult::advance();
|
||||
});
|
||||
if (walkResult.wasInterrupted())
|
||||
return false;
|
||||
return true;
|
||||
return !walkResult.wasInterrupted();
|
||||
}
|
||||
|
||||
// Gathers all maximal sub-blocks of operations that do not themselves
|
||||
|
|
|
@ -71,7 +71,7 @@ ConvertTosaNegateOp::matchAndRewrite(Operation *op,
|
|||
double typeRangeMax = double(outputElementType.getStorageTypeMax() -
|
||||
outputElementType.getZeroPoint()) *
|
||||
outputElementType.getScale();
|
||||
bool narrowRange = outputElementType.getStorageTypeMin() == 1 ? true : false;
|
||||
bool narrowRange = outputElementType.getStorageTypeMin() == 1;
|
||||
|
||||
auto dstQConstType = RankedTensorType::get(
|
||||
outputType.getShape(),
|
||||
|
|
|
@ -391,9 +391,7 @@ struct TestVectorToLoopPatterns
|
|||
type.getNumElements() % multiplicity != 0)
|
||||
return mlir::WalkResult::advance();
|
||||
auto filterAlloc = [](Operation *op) {
|
||||
if (isa<arith::ConstantOp, memref::AllocOp, CallOp>(op))
|
||||
return false;
|
||||
return true;
|
||||
return !isa<arith::ConstantOp, memref::AllocOp, CallOp>(op);
|
||||
};
|
||||
auto dependentOps = getSlice(op, filterAlloc);
|
||||
// Create a loop and move instructions from the Op slice into the loop.
|
||||
|
|
Loading…
Reference in New Issue