forked from OSchip/llvm-project
[SVE] Bail from VectorUtils heuristics for scalable vectors
Bail from maskIsAllZeroOrUndef and maskIsAllOneOrUndef prior to iterating over the number of elements for scalable vectors. Assert that the mask type is not scalable in possiblyDemandedEltsInMask . Assert that the types are correct in all three functions. Reviewed By: efriedma Differential Revision: https://reviews.llvm.org/D87424
This commit is contained in:
parent
783e28a508
commit
7ddfd9b3eb
|
@ -544,20 +544,20 @@ createSequentialMask(unsigned Start, unsigned NumInts, unsigned NumUndefs);
|
|||
/// elements, it will be padded with undefs.
|
||||
Value *concatenateVectors(IRBuilderBase &Builder, ArrayRef<Value *> Vecs);
|
||||
|
||||
/// Given a mask vector of the form <Y x i1>, Return true if all of the
|
||||
/// elements of this predicate mask are false or undef. That is, return true
|
||||
/// if all lanes can be assumed inactive.
|
||||
/// Given a mask vector of i1, Return true if all of the elements of this
|
||||
/// predicate mask are known to be false or undef. That is, return true if all
|
||||
/// lanes can be assumed inactive.
|
||||
bool maskIsAllZeroOrUndef(Value *Mask);
|
||||
|
||||
/// Given a mask vector of the form <Y x i1>, Return true if all of the
|
||||
/// elements of this predicate mask are true or undef. That is, return true
|
||||
/// if all lanes can be assumed active.
|
||||
/// Given a mask vector of i1, Return true if all of the elements of this
|
||||
/// predicate mask are known to be true or undef. That is, return true if all
|
||||
/// lanes can be assumed active.
|
||||
bool maskIsAllOneOrUndef(Value *Mask);
|
||||
|
||||
/// Given a mask vector of the form <Y x i1>, return an APInt (of bitwidth Y)
|
||||
/// for each lane which may be active.
|
||||
APInt possiblyDemandedEltsInMask(Value *Mask);
|
||||
|
||||
|
||||
/// The group of interleaved loads/stores sharing the same stride and
|
||||
/// close to each other.
|
||||
///
|
||||
|
|
|
@ -863,11 +863,19 @@ Value *llvm::concatenateVectors(IRBuilderBase &Builder,
|
|||
}
|
||||
|
||||
bool llvm::maskIsAllZeroOrUndef(Value *Mask) {
|
||||
assert(isa<VectorType>(Mask->getType()) &&
|
||||
isa<IntegerType>(Mask->getType()->getScalarType()) &&
|
||||
cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
|
||||
1 &&
|
||||
"Mask must be a vector of i1");
|
||||
|
||||
auto *ConstMask = dyn_cast<Constant>(Mask);
|
||||
if (!ConstMask)
|
||||
return false;
|
||||
if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask))
|
||||
return true;
|
||||
if (isa<ScalableVectorType>(ConstMask->getType()))
|
||||
return false;
|
||||
for (unsigned
|
||||
I = 0,
|
||||
E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
|
||||
|
@ -882,11 +890,19 @@ bool llvm::maskIsAllZeroOrUndef(Value *Mask) {
|
|||
|
||||
|
||||
bool llvm::maskIsAllOneOrUndef(Value *Mask) {
|
||||
assert(isa<VectorType>(Mask->getType()) &&
|
||||
isa<IntegerType>(Mask->getType()->getScalarType()) &&
|
||||
cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
|
||||
1 &&
|
||||
"Mask must be a vector of i1");
|
||||
|
||||
auto *ConstMask = dyn_cast<Constant>(Mask);
|
||||
if (!ConstMask)
|
||||
return false;
|
||||
if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
|
||||
return true;
|
||||
if (isa<ScalableVectorType>(ConstMask->getType()))
|
||||
return false;
|
||||
for (unsigned
|
||||
I = 0,
|
||||
E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
|
||||
|
@ -902,6 +918,11 @@ bool llvm::maskIsAllOneOrUndef(Value *Mask) {
|
|||
/// TODO: This is a lot like known bits, but for
|
||||
/// vectors. Is there something we can common this with?
|
||||
APInt llvm::possiblyDemandedEltsInMask(Value *Mask) {
|
||||
assert(isa<FixedVectorType>(Mask->getType()) &&
|
||||
isa<IntegerType>(Mask->getType()->getScalarType()) &&
|
||||
cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
|
||||
1 &&
|
||||
"Mask must be a fixed width vector of i1");
|
||||
|
||||
const unsigned VWidth =
|
||||
cast<FixedVectorType>(Mask->getType())->getNumElements();
|
||||
|
|
|
@ -319,11 +319,14 @@ Instruction *InstCombinerImpl::simplifyMaskedStore(IntrinsicInst &II) {
|
|||
return new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment);
|
||||
}
|
||||
|
||||
if (isa<ScalableVectorType>(ConstMask->getType()))
|
||||
return nullptr;
|
||||
|
||||
// Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
|
||||
APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
|
||||
APInt UndefElts(DemandedElts.getBitWidth(), 0);
|
||||
if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0),
|
||||
DemandedElts, UndefElts))
|
||||
if (Value *V =
|
||||
SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, UndefElts))
|
||||
return replaceOperand(II, 0, V);
|
||||
|
||||
return nullptr;
|
||||
|
@ -355,14 +358,17 @@ Instruction *InstCombinerImpl::simplifyMaskedScatter(IntrinsicInst &II) {
|
|||
if (ConstMask->isNullValue())
|
||||
return eraseInstFromFunction(II);
|
||||
|
||||
if (isa<ScalableVectorType>(ConstMask->getType()))
|
||||
return nullptr;
|
||||
|
||||
// Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
|
||||
APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
|
||||
APInt UndefElts(DemandedElts.getBitWidth(), 0);
|
||||
if (Value *V = SimplifyDemandedVectorElts(II.getOperand(0),
|
||||
DemandedElts, UndefElts))
|
||||
if (Value *V =
|
||||
SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, UndefElts))
|
||||
return replaceOperand(II, 0, V);
|
||||
if (Value *V = SimplifyDemandedVectorElts(II.getOperand(1),
|
||||
DemandedElts, UndefElts))
|
||||
if (Value *V =
|
||||
SimplifyDemandedVectorElts(II.getOperand(1), DemandedElts, UndefElts))
|
||||
return replaceOperand(II, 1, V);
|
||||
|
||||
return nullptr;
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
; RUN: opt -S -instcombine < %s | FileCheck %s
|
||||
|
||||
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
|
||||
target triple = "aarch64-unknown-linux-gnu"
|
||||
|
||||
; This test checks that instcombine does not crash while invoking
|
||||
; maskIsAllOneOrUndef, maskIsAllZeroOrUndef, or possiblyDemandedEltsInMask.
|
||||
|
||||
; CHECK-LABEL: novel_algorithm
|
||||
; CHECK: unreachable
|
||||
define void @novel_algorithm() {
|
||||
entry:
|
||||
%a = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>* undef, i32 1, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> undef, i1 true, i32 0), <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer), <vscale x 16 x i8> undef)
|
||||
%b = add <vscale x 16 x i8> undef, %a
|
||||
call void @llvm.masked.store.nxv16i8.p0nxv16i8(<vscale x 16 x i8> %b, <vscale x 16 x i8>* undef, i32 1, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> undef, i1 true, i32 0), <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer))
|
||||
unreachable
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0nxv16i8(<vscale x 16 x i8>*, i32 immarg, <vscale x 16 x i1>, <vscale x 16 x i8>)
|
||||
|
||||
declare void @llvm.masked.store.nxv16i8.p0nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>*, i32 immarg, <vscale x 16 x i1>)
|
Loading…
Reference in New Issue