diff --git a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp index eab49b5f8b37..9ec2afb99d68 100644 --- a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp +++ b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp @@ -434,7 +434,7 @@ static bool processCallSite(CallSite CS, LazyValueInfo *LVI) { unsigned ArgNo = 0; if (auto *WO = dyn_cast(CS.getInstruction())) { - if (willNotOverflow(WO, LVI)) { + if (WO->getLHS()->getType()->isIntegerTy() && willNotOverflow(WO, LVI)) { processOverflowIntrinsic(WO); return true; } diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/overflows.ll b/llvm/test/Transforms/CorrelatedValuePropagation/overflows.ll index 860ebafd0749..a0d268c059f9 100644 --- a/llvm/test/Transforms/CorrelatedValuePropagation/overflows.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/overflows.ll @@ -21,6 +21,8 @@ declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32) declare { i8, i1 } @llvm.umul.with.overflow.i8(i8, i8) +declare { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32>, <2 x i32>) + declare i8 @llvm.uadd.sat.i8(i8, i8) declare i8 @llvm.sadd.sat.i8(i8, i8) declare i8 @llvm.usub.sat.i8(i8, i8) @@ -731,6 +733,16 @@ define { i8, i1 } @signed_mul_constant_folding() { ret { i8, i1 } %mul } +define { <2 x i32>, <2 x i1> } @uaddo_vec(<2 x i32> %a) { +; CHECK-LABEL: @uaddo_vec( +; CHECK-NEXT: [[ADD:%.*]] = call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[A:%.*]], <2 x i32> ) +; CHECK-NEXT: ret { <2 x i32>, <2 x i1> } [[ADD]] +; + %add = call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> ) + ret { <2 x i32>, <2 x i1> } %add +} + + define i8 @uadd_sat_no_overflow(i8 %x) { ; CHECK-LABEL: @uadd_sat_no_overflow( ; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[X:%.*]], 100