diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp index 4f1d888ca0a2..1d3e26b93cb6 100644 --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -3505,15 +3505,15 @@ const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl &Ops) { } const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { + // We can bypass creating a target-independent + // constant expression and then folding it back into a ConstantInt. + // This is just a compile-time optimization. if (isa(AllocTy)) { Constant *NullPtr = Constant::getNullValue(AllocTy->getPointerTo()); Constant *One = ConstantInt::get(IntTy, 1); Constant *GEP = ConstantExpr::getGetElementPtr(AllocTy, NullPtr, One); - return getUnknown(ConstantExpr::getPtrToInt(GEP, IntTy)); + return getSCEV(ConstantExpr::getPtrToInt(GEP, IntTy)); } - // We can bypass creating a target-independent - // constant expression and then folding it back into a ConstantInt. - // This is just a compile-time optimization. return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy)); } @@ -6301,36 +6301,6 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) { return getSCEV(U->getOperand(0)); break; - case Instruction::PtrToInt: { - // It's tempting to handle inttoptr and ptrtoint as no-ops, - // however this can lead to pointer expressions which cannot safely be - // expanded to GEPs because ScalarEvolution doesn't respect - // the GEP aliasing rules when simplifying integer expressions. - // - // However, given - // %x = ??? - // %y = ptrtoint %x - // %z = ptrtoint %x - // it is safe to say that %y and %z are the same thing. - // - // So instead of modelling the cast itself as unknown, - // since the casts are transparent within SCEV, - // we can at least model the casts original value as unknow instead. - - // BUT, there's caveat. If we simply model %x as unknown, unrelated uses - // of %x will also see it as unknown, which is obviously bad. - // So we can only do this iff %x would be modelled as unknown anyways. - auto *OpSCEV = getSCEV(U->getOperand(0)); - if (isa(OpSCEV)) - return getTruncateOrZeroExtend(OpSCEV, U->getType()); - // If we can model the operand, however, we must fallback to modelling - // the whole cast as unknown instead. - LLVM_FALLTHROUGH; - } - case Instruction::IntToPtr: - // We can't do this for inttoptr at all, however. - return getUnknown(V); - case Instruction::SDiv: // If both operands are non-negative, this is just an udiv. if (isKnownNonNegative(getSCEV(U->getOperand(0))) && @@ -6345,6 +6315,11 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) { return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1))); break; + // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can + // lead to pointer expressions which cannot safely be expanded to GEPs, + // because ScalarEvolution doesn't respect the GEP aliasing rules when + // simplifying integer expressions. + case Instruction::GetElementPtr: return createNodeForGEP(cast(U)); @@ -7976,7 +7951,7 @@ const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { /// will return Constants for objects which aren't represented by a /// SCEVConstant, because SCEVConstant is restricted to ConstantInt. /// Returns NULL if the SCEV isn't representable as a Constant. -static Constant *BuildConstantFromSCEV(const SCEV *V, const DataLayout &DL) { +static Constant *BuildConstantFromSCEV(const SCEV *V) { switch (static_cast(V->getSCEVType())) { case scCouldNotCompute: case scAddRecExpr: @@ -7987,47 +7962,32 @@ static Constant *BuildConstantFromSCEV(const SCEV *V, const DataLayout &DL) { return dyn_cast(cast(V)->getValue()); case scSignExtend: { const SCEVSignExtendExpr *SS = cast(V); - if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand(), DL)) { - if (CastOp->getType()->isPointerTy()) - // Note that for SExt, unlike ZExt/Trunc, it is incorrect to just call - // ConstantExpr::getPtrToInt() and be done with it, because PtrToInt - // will zero-extend (otherwise ZExt case wouldn't work). So we need to - // first cast to the same-bitwidth integer, and then SExt it. - CastOp = ConstantExpr::getPtrToInt( - CastOp, DL.getIntPtrType(CastOp->getType())); - // And now, we can actually perform the sign-extension. + if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) return ConstantExpr::getSExt(CastOp, SS->getType()); - } break; } case scZeroExtend: { const SCEVZeroExtendExpr *SZ = cast(V); - if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand(), DL)) { - if (!CastOp->getType()->isPointerTy()) - return ConstantExpr::getZExt(CastOp, SZ->getType()); - return ConstantExpr::getPtrToInt(CastOp, SZ->getType()); - } + if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) + return ConstantExpr::getZExt(CastOp, SZ->getType()); break; } case scTruncate: { const SCEVTruncateExpr *ST = cast(V); - if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand(), DL)) { - if (!CastOp->getType()->isPointerTy()) - return ConstantExpr::getTrunc(CastOp, ST->getType()); - return ConstantExpr::getPtrToInt(CastOp, ST->getType()); - } + if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) + return ConstantExpr::getTrunc(CastOp, ST->getType()); break; } case scAddExpr: { const SCEVAddExpr *SA = cast(V); - if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0), DL)) { + if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { if (PointerType *PTy = dyn_cast(C->getType())) { unsigned AS = PTy->getAddressSpace(); Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); C = ConstantExpr::getBitCast(C, DestPtrTy); } for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { - Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i), DL); + Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); if (!C2) return nullptr; // First pointer! @@ -8059,11 +8019,11 @@ static Constant *BuildConstantFromSCEV(const SCEV *V, const DataLayout &DL) { } case scMulExpr: { const SCEVMulExpr *SM = cast(V); - if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0), DL)) { + if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { // Don't bother with pointers at all. if (C->getType()->isPointerTy()) return nullptr; for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { - Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i), DL); + Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); if (!C2 || C2->getType()->isPointerTy()) return nullptr; C = ConstantExpr::getMul(C, C2); } @@ -8073,8 +8033,8 @@ static Constant *BuildConstantFromSCEV(const SCEV *V, const DataLayout &DL) { } case scUDivExpr: { const SCEVUDivExpr *SU = cast(V); - if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS(), DL)) - if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS(), DL)) + if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) + if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) if (LHS->getType() == RHS->getType()) return ConstantExpr::getUDiv(LHS, RHS); break; @@ -8179,7 +8139,7 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { const SCEV *OpV = getSCEVAtScope(OrigV, L); MadeImprovement |= OrigV != OpV; - Constant *C = BuildConstantFromSCEV(OpV, getDataLayout()); + Constant *C = BuildConstantFromSCEV(OpV); if (!C) return V; if (C->getType() != Op->getType()) C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, diff --git a/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp b/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp index 3e280a66175c..2d71b0fff889 100644 --- a/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp @@ -427,7 +427,7 @@ static bool willNotOverflow(ScalarEvolution *SE, Instruction::BinaryOps BinOp, : &ScalarEvolution::getZeroExtendExpr; // Check ext(LHS op RHS) == ext(LHS) op ext(RHS) - auto *NarrowTy = cast(SE->getEffectiveSCEVType(LHS->getType())); + auto *NarrowTy = cast(LHS->getType()); auto *WideTy = IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2); diff --git a/llvm/test/Analysis/ScalarEvolution/add-expr-pointer-operand-sorting.ll b/llvm/test/Analysis/ScalarEvolution/add-expr-pointer-operand-sorting.ll index e798e2715ba1..93a3bf4d4c37 100644 --- a/llvm/test/Analysis/ScalarEvolution/add-expr-pointer-operand-sorting.ll +++ b/llvm/test/Analysis/ScalarEvolution/add-expr-pointer-operand-sorting.ll @@ -33,9 +33,9 @@ define i32 @d(i32 %base) { ; CHECK-NEXT: %1 = load i32*, i32** @c, align 8 ; CHECK-NEXT: --> %1 U: full-set S: full-set Exits: <> LoopDispositions: { %for.cond: Variant } ; CHECK-NEXT: %sub.ptr.lhs.cast = ptrtoint i32* %1 to i64 -; CHECK-NEXT: --> %1 U: full-set S: full-set Exits: <> LoopDispositions: { %for.cond: Variant } +; CHECK-NEXT: --> %sub.ptr.lhs.cast U: full-set S: full-set Exits: <> LoopDispositions: { %for.cond: Variant } ; CHECK-NEXT: %sub.ptr.sub = sub i64 %sub.ptr.lhs.cast, ptrtoint ([1 x i32]* @b to i64) -; CHECK-NEXT: --> ((-1 * @b) + %1) U: full-set S: full-set Exits: <> LoopDispositions: { %for.cond: Variant } +; CHECK-NEXT: --> ((-1 * ptrtoint ([1 x i32]* @b to i64)) + %sub.ptr.lhs.cast) U: full-set S: full-set Exits: <> LoopDispositions: { %for.cond: Variant } ; CHECK-NEXT: %sub.ptr.div = sdiv exact i64 %sub.ptr.sub, 4 ; CHECK-NEXT: --> %sub.ptr.div U: full-set S: [-2305843009213693952,2305843009213693952) Exits: <> LoopDispositions: { %for.cond: Variant } ; CHECK-NEXT: %arrayidx1 = getelementptr inbounds [1 x i8], [1 x i8]* %arrayidx, i64 0, i64 %sub.ptr.div diff --git a/llvm/test/Analysis/ScalarEvolution/no-wrap-add-exprs.ll b/llvm/test/Analysis/ScalarEvolution/no-wrap-add-exprs.ll index eb669cab0c79..5a7bb3c9e5cd 100644 --- a/llvm/test/Analysis/ScalarEvolution/no-wrap-add-exprs.ll +++ b/llvm/test/Analysis/ScalarEvolution/no-wrap-add-exprs.ll @@ -170,14 +170,14 @@ define void @f3(i8* %x_addr, i8* %y_addr, i32* %tmp_addr) { %int5 = add i32 %int0, 5 %int.zext = zext i32 %int5 to i64 ; CHECK: %int.zext = zext i32 %int5 to i64 -; CHECK-NEXT: --> (1 + (zext i32 (4 + (trunc [16 x i8]* @z_addr to i32)) to i64)) U: [1,4294967294) S: [1,4294967297) +; CHECK-NEXT: --> (1 + (zext i32 (4 + %int0) to i64)) U: [1,4294967294) S: [1,4294967297) %ptr_noalign = bitcast [16 x i8]* @z_addr_noalign to i8* %int0_na = ptrtoint i8* %ptr_noalign to i32 %int5_na = add i32 %int0_na, 5 %int.zext_na = zext i32 %int5_na to i64 ; CHECK: %int.zext_na = zext i32 %int5_na to i64 -; CHECK-NEXT: --> (zext i32 (5 + (trunc [16 x i8]* @z_addr_noalign to i32)) to i64) U: [0,4294967296) S: [0,4294967296) +; CHECK-NEXT: --> (zext i32 (5 + %int0_na) to i64) U: [0,4294967296) S: [0,4294967296) %tmp = load i32, i32* %tmp_addr %mul = and i32 %tmp, -4 diff --git a/llvm/test/Analysis/ScalarEvolution/ptrtoint-constantexpr-loop.ll b/llvm/test/Analysis/ScalarEvolution/ptrtoint-constantexpr-loop.ll index edb4c1e8fb58..bf8f6340e599 100644 --- a/llvm/test/Analysis/ScalarEvolution/ptrtoint-constantexpr-loop.ll +++ b/llvm/test/Analysis/ScalarEvolution/ptrtoint-constantexpr-loop.ll @@ -13,31 +13,48 @@ declare void @use16(i16) define hidden i32* @trunc_ptr_to_i64(i8* %arg, i32* %arg10) { -; X64-LABEL: 'trunc_ptr_to_i64' -; X64-NEXT: Classifying expressions for: @trunc_ptr_to_i64 -; X64-NEXT: %tmp = phi i32 [ 0, %bb ], [ %tmp18, %bb17 ] -; X64-NEXT: --> {0,+,2}<%bb11> U: [0,-1) S: [-2147483648,2147483647) Exits: <> LoopDispositions: { %bb11: Computable } -; X64-NEXT: %tmp12 = getelementptr i8, i8* %arg, i64 ptrtoint ([0 x i8]* @global to i64) -; X64-NEXT: --> (@global + %arg) U: full-set S: full-set Exits: (@global + %arg) LoopDispositions: { %bb11: Invariant } -; X64-NEXT: %tmp13 = bitcast i8* %tmp12 to i32* -; X64-NEXT: --> (@global + %arg) U: full-set S: full-set Exits: (@global + %arg) LoopDispositions: { %bb11: Invariant } -; X64-NEXT: %tmp14 = load i32, i32* %tmp13, align 4 -; X64-NEXT: --> %tmp14 U: full-set S: full-set Exits: <> LoopDispositions: { %bb11: Variant } -; X64-NEXT: %tmp18 = add i32 %tmp, 2 -; X64-NEXT: --> {2,+,2}<%bb11> U: [0,-1) S: [-2147483648,2147483647) Exits: <> LoopDispositions: { %bb11: Computable } -; X64-NEXT: Determining loop execution counts for: @trunc_ptr_to_i64 -; X64-NEXT: Loop %bb11: Unpredictable backedge-taken count. -; X64-NEXT: Loop %bb11: Unpredictable max backedge-taken count. -; X64-NEXT: Loop %bb11: Unpredictable predicated backedge-taken count. +; PTR64_IDX64-LABEL: 'trunc_ptr_to_i64' +; PTR64_IDX64-NEXT: Classifying expressions for: @trunc_ptr_to_i64 +; PTR64_IDX64-NEXT: %tmp = phi i32 [ 0, %bb ], [ %tmp18, %bb17 ] +; PTR64_IDX64-NEXT: --> {0,+,2}<%bb11> U: [0,-1) S: [-2147483648,2147483647) Exits: <> LoopDispositions: { %bb11: Computable } +; PTR64_IDX64-NEXT: %tmp12 = getelementptr i8, i8* %arg, i64 ptrtoint ([0 x i8]* @global to i64) +; PTR64_IDX64-NEXT: --> (ptrtoint ([0 x i8]* @global to i64) + %arg) U: full-set S: full-set Exits: (ptrtoint ([0 x i8]* @global to i64) + %arg) LoopDispositions: { %bb11: Invariant } +; PTR64_IDX64-NEXT: %tmp13 = bitcast i8* %tmp12 to i32* +; PTR64_IDX64-NEXT: --> (ptrtoint ([0 x i8]* @global to i64) + %arg) U: full-set S: full-set Exits: (ptrtoint ([0 x i8]* @global to i64) + %arg) LoopDispositions: { %bb11: Invariant } +; PTR64_IDX64-NEXT: %tmp14 = load i32, i32* %tmp13, align 4 +; PTR64_IDX64-NEXT: --> %tmp14 U: full-set S: full-set Exits: <> LoopDispositions: { %bb11: Variant } +; PTR64_IDX64-NEXT: %tmp18 = add i32 %tmp, 2 +; PTR64_IDX64-NEXT: --> {2,+,2}<%bb11> U: [0,-1) S: [-2147483648,2147483647) Exits: <> LoopDispositions: { %bb11: Computable } +; PTR64_IDX64-NEXT: Determining loop execution counts for: @trunc_ptr_to_i64 +; PTR64_IDX64-NEXT: Loop %bb11: Unpredictable backedge-taken count. +; PTR64_IDX64-NEXT: Loop %bb11: Unpredictable max backedge-taken count. +; PTR64_IDX64-NEXT: Loop %bb11: Unpredictable predicated backedge-taken count. +; +; PTR64_IDX32-LABEL: 'trunc_ptr_to_i64' +; PTR64_IDX32-NEXT: Classifying expressions for: @trunc_ptr_to_i64 +; PTR64_IDX32-NEXT: %tmp = phi i32 [ 0, %bb ], [ %tmp18, %bb17 ] +; PTR64_IDX32-NEXT: --> {0,+,2}<%bb11> U: [0,-1) S: [-2147483648,2147483647) Exits: <> LoopDispositions: { %bb11: Computable } +; PTR64_IDX32-NEXT: %tmp12 = getelementptr i8, i8* %arg, i64 ptrtoint ([0 x i8]* @global to i64) +; PTR64_IDX32-NEXT: --> ((trunc i64 ptrtoint ([0 x i8]* @global to i64) to i32) + %arg) U: full-set S: full-set Exits: ((trunc i64 ptrtoint ([0 x i8]* @global to i64) to i32) + %arg) LoopDispositions: { %bb11: Invariant } +; PTR64_IDX32-NEXT: %tmp13 = bitcast i8* %tmp12 to i32* +; PTR64_IDX32-NEXT: --> ((trunc i64 ptrtoint ([0 x i8]* @global to i64) to i32) + %arg) U: full-set S: full-set Exits: ((trunc i64 ptrtoint ([0 x i8]* @global to i64) to i32) + %arg) LoopDispositions: { %bb11: Invariant } +; PTR64_IDX32-NEXT: %tmp14 = load i32, i32* %tmp13, align 4 +; PTR64_IDX32-NEXT: --> %tmp14 U: full-set S: full-set Exits: <> LoopDispositions: { %bb11: Variant } +; PTR64_IDX32-NEXT: %tmp18 = add i32 %tmp, 2 +; PTR64_IDX32-NEXT: --> {2,+,2}<%bb11> U: [0,-1) S: [-2147483648,2147483647) Exits: <> LoopDispositions: { %bb11: Computable } +; PTR64_IDX32-NEXT: Determining loop execution counts for: @trunc_ptr_to_i64 +; PTR64_IDX32-NEXT: Loop %bb11: Unpredictable backedge-taken count. +; PTR64_IDX32-NEXT: Loop %bb11: Unpredictable max backedge-taken count. +; PTR64_IDX32-NEXT: Loop %bb11: Unpredictable predicated backedge-taken count. ; ; PTR16_IDX16-LABEL: 'trunc_ptr_to_i64' ; PTR16_IDX16-NEXT: Classifying expressions for: @trunc_ptr_to_i64 ; PTR16_IDX16-NEXT: %tmp = phi i32 [ 0, %bb ], [ %tmp18, %bb17 ] ; PTR16_IDX16-NEXT: --> {0,+,2}<%bb11> U: [0,-1) S: [-2147483648,2147483647) Exits: <> LoopDispositions: { %bb11: Computable } ; PTR16_IDX16-NEXT: %tmp12 = getelementptr i8, i8* %arg, i64 ptrtoint ([0 x i8]* @global to i64) -; PTR16_IDX16-NEXT: --> (@global + %arg) U: full-set S: full-set Exits: (@global + %arg) LoopDispositions: { %bb11: Invariant } +; PTR16_IDX16-NEXT: --> ((trunc i64 ptrtoint ([0 x i8]* @global to i64) to i16) + %arg) U: full-set S: full-set Exits: ((trunc i64 ptrtoint ([0 x i8]* @global to i64) to i16) + %arg) LoopDispositions: { %bb11: Invariant } ; PTR16_IDX16-NEXT: %tmp13 = bitcast i8* %tmp12 to i32* -; PTR16_IDX16-NEXT: --> (@global + %arg) U: full-set S: full-set Exits: (@global + %arg) LoopDispositions: { %bb11: Invariant } +; PTR16_IDX16-NEXT: --> ((trunc i64 ptrtoint ([0 x i8]* @global to i64) to i16) + %arg) U: full-set S: full-set Exits: ((trunc i64 ptrtoint ([0 x i8]* @global to i64) to i16) + %arg) LoopDispositions: { %bb11: Invariant } ; PTR16_IDX16-NEXT: %tmp14 = load i32, i32* %tmp13, align 4 ; PTR16_IDX16-NEXT: --> %tmp14 U: full-set S: full-set Exits: <> LoopDispositions: { %bb11: Variant } ; PTR16_IDX16-NEXT: %tmp18 = add i32 %tmp, 2 @@ -52,9 +69,9 @@ define hidden i32* @trunc_ptr_to_i64(i8* %arg, i32* %arg10) { ; PTR16_IDX32-NEXT: %tmp = phi i32 [ 0, %bb ], [ %tmp18, %bb17 ] ; PTR16_IDX32-NEXT: --> {0,+,2}<%bb11> U: [0,-1) S: [-2147483648,2147483647) Exits: <> LoopDispositions: { %bb11: Computable } ; PTR16_IDX32-NEXT: %tmp12 = getelementptr i8, i8* %arg, i64 ptrtoint ([0 x i8]* @global to i64) -; PTR16_IDX32-NEXT: --> (@global + %arg) U: [0,131071) S: full-set Exits: (@global + %arg) LoopDispositions: { %bb11: Invariant } +; PTR16_IDX32-NEXT: --> ((trunc i64 ptrtoint ([0 x i8]* @global to i64) to i32) + %arg) U: [0,131071) S: full-set Exits: ((trunc i64 ptrtoint ([0 x i8]* @global to i64) to i32) + %arg) LoopDispositions: { %bb11: Invariant } ; PTR16_IDX32-NEXT: %tmp13 = bitcast i8* %tmp12 to i32* -; PTR16_IDX32-NEXT: --> (@global + %arg) U: [0,131071) S: full-set Exits: (@global + %arg) LoopDispositions: { %bb11: Invariant } +; PTR16_IDX32-NEXT: --> ((trunc i64 ptrtoint ([0 x i8]* @global to i64) to i32) + %arg) U: [0,131071) S: full-set Exits: ((trunc i64 ptrtoint ([0 x i8]* @global to i64) to i32) + %arg) LoopDispositions: { %bb11: Invariant } ; PTR16_IDX32-NEXT: %tmp14 = load i32, i32* %tmp13, align 4 ; PTR16_IDX32-NEXT: --> %tmp14 U: full-set S: full-set Exits: <> LoopDispositions: { %bb11: Variant } ; PTR16_IDX32-NEXT: %tmp18 = add i32 %tmp, 2 @@ -88,9 +105,9 @@ define hidden i32* @trunc_ptr_to_i32(i8* %arg, i32* %arg10) { ; PTR64_IDX64-NEXT: %tmp = phi i32 [ 0, %bb ], [ %tmp18, %bb17 ] ; PTR64_IDX64-NEXT: --> {0,+,2}<%bb11> U: [0,-1) S: [-2147483648,2147483647) Exits: <> LoopDispositions: { %bb11: Computable } ; PTR64_IDX64-NEXT: %tmp12 = getelementptr i8, i8* %arg, i32 ptrtoint ([0 x i8]* @global to i32) -; PTR64_IDX64-NEXT: --> ((sext i32 (trunc [0 x i8]* @global to i32) to i64) + %arg) U: full-set S: full-set Exits: ((sext i32 (trunc [0 x i8]* @global to i32) to i64) + %arg) LoopDispositions: { %bb11: Invariant } +; PTR64_IDX64-NEXT: --> ((sext i32 ptrtoint ([0 x i8]* @global to i32) to i64) + %arg) U: full-set S: full-set Exits: ((sext i32 ptrtoint ([0 x i8]* @global to i32) to i64) + %arg) LoopDispositions: { %bb11: Invariant } ; PTR64_IDX64-NEXT: %tmp13 = bitcast i8* %tmp12 to i32* -; PTR64_IDX64-NEXT: --> ((sext i32 (trunc [0 x i8]* @global to i32) to i64) + %arg) U: full-set S: full-set Exits: ((sext i32 (trunc [0 x i8]* @global to i32) to i64) + %arg) LoopDispositions: { %bb11: Invariant } +; PTR64_IDX64-NEXT: --> ((sext i32 ptrtoint ([0 x i8]* @global to i32) to i64) + %arg) U: full-set S: full-set Exits: ((sext i32 ptrtoint ([0 x i8]* @global to i32) to i64) + %arg) LoopDispositions: { %bb11: Invariant } ; PTR64_IDX64-NEXT: %tmp14 = load i32, i32* %tmp13, align 4 ; PTR64_IDX64-NEXT: --> %tmp14 U: full-set S: full-set Exits: <> LoopDispositions: { %bb11: Variant } ; PTR64_IDX64-NEXT: %tmp18 = add i32 %tmp, 2 @@ -105,9 +122,9 @@ define hidden i32* @trunc_ptr_to_i32(i8* %arg, i32* %arg10) { ; PTR64_IDX32-NEXT: %tmp = phi i32 [ 0, %bb ], [ %tmp18, %bb17 ] ; PTR64_IDX32-NEXT: --> {0,+,2}<%bb11> U: [0,-1) S: [-2147483648,2147483647) Exits: <> LoopDispositions: { %bb11: Computable } ; PTR64_IDX32-NEXT: %tmp12 = getelementptr i8, i8* %arg, i32 ptrtoint ([0 x i8]* @global to i32) -; PTR64_IDX32-NEXT: --> (@global + %arg) U: full-set S: full-set Exits: (@global + %arg) LoopDispositions: { %bb11: Invariant } +; PTR64_IDX32-NEXT: --> (ptrtoint ([0 x i8]* @global to i32) + %arg) U: full-set S: full-set Exits: (ptrtoint ([0 x i8]* @global to i32) + %arg) LoopDispositions: { %bb11: Invariant } ; PTR64_IDX32-NEXT: %tmp13 = bitcast i8* %tmp12 to i32* -; PTR64_IDX32-NEXT: --> (@global + %arg) U: full-set S: full-set Exits: (@global + %arg) LoopDispositions: { %bb11: Invariant } +; PTR64_IDX32-NEXT: --> (ptrtoint ([0 x i8]* @global to i32) + %arg) U: full-set S: full-set Exits: (ptrtoint ([0 x i8]* @global to i32) + %arg) LoopDispositions: { %bb11: Invariant } ; PTR64_IDX32-NEXT: %tmp14 = load i32, i32* %tmp13, align 4 ; PTR64_IDX32-NEXT: --> %tmp14 U: full-set S: full-set Exits: <> LoopDispositions: { %bb11: Variant } ; PTR64_IDX32-NEXT: %tmp18 = add i32 %tmp, 2 @@ -122,9 +139,9 @@ define hidden i32* @trunc_ptr_to_i32(i8* %arg, i32* %arg10) { ; PTR16_IDX16-NEXT: %tmp = phi i32 [ 0, %bb ], [ %tmp18, %bb17 ] ; PTR16_IDX16-NEXT: --> {0,+,2}<%bb11> U: [0,-1) S: [-2147483648,2147483647) Exits: <> LoopDispositions: { %bb11: Computable } ; PTR16_IDX16-NEXT: %tmp12 = getelementptr i8, i8* %arg, i32 ptrtoint ([0 x i8]* @global to i32) -; PTR16_IDX16-NEXT: --> (@global + %arg) U: full-set S: full-set Exits: (@global + %arg) LoopDispositions: { %bb11: Invariant } +; PTR16_IDX16-NEXT: --> ((trunc i32 ptrtoint ([0 x i8]* @global to i32) to i16) + %arg) U: full-set S: full-set Exits: ((trunc i32 ptrtoint ([0 x i8]* @global to i32) to i16) + %arg) LoopDispositions: { %bb11: Invariant } ; PTR16_IDX16-NEXT: %tmp13 = bitcast i8* %tmp12 to i32* -; PTR16_IDX16-NEXT: --> (@global + %arg) U: full-set S: full-set Exits: (@global + %arg) LoopDispositions: { %bb11: Invariant } +; PTR16_IDX16-NEXT: --> ((trunc i32 ptrtoint ([0 x i8]* @global to i32) to i16) + %arg) U: full-set S: full-set Exits: ((trunc i32 ptrtoint ([0 x i8]* @global to i32) to i16) + %arg) LoopDispositions: { %bb11: Invariant } ; PTR16_IDX16-NEXT: %tmp14 = load i32, i32* %tmp13, align 4 ; PTR16_IDX16-NEXT: --> %tmp14 U: full-set S: full-set Exits: <> LoopDispositions: { %bb11: Variant } ; PTR16_IDX16-NEXT: %tmp18 = add i32 %tmp, 2 @@ -139,9 +156,9 @@ define hidden i32* @trunc_ptr_to_i32(i8* %arg, i32* %arg10) { ; PTR16_IDX32-NEXT: %tmp = phi i32 [ 0, %bb ], [ %tmp18, %bb17 ] ; PTR16_IDX32-NEXT: --> {0,+,2}<%bb11> U: [0,-1) S: [-2147483648,2147483647) Exits: <> LoopDispositions: { %bb11: Computable } ; PTR16_IDX32-NEXT: %tmp12 = getelementptr i8, i8* %arg, i32 ptrtoint ([0 x i8]* @global to i32) -; PTR16_IDX32-NEXT: --> (@global + %arg) U: [0,131071) S: full-set Exits: (@global + %arg) LoopDispositions: { %bb11: Invariant } +; PTR16_IDX32-NEXT: --> (ptrtoint ([0 x i8]* @global to i32) + %arg) U: [0,131071) S: full-set Exits: (ptrtoint ([0 x i8]* @global to i32) + %arg) LoopDispositions: { %bb11: Invariant } ; PTR16_IDX32-NEXT: %tmp13 = bitcast i8* %tmp12 to i32* -; PTR16_IDX32-NEXT: --> (@global + %arg) U: [0,131071) S: full-set Exits: (@global + %arg) LoopDispositions: { %bb11: Invariant } +; PTR16_IDX32-NEXT: --> (ptrtoint ([0 x i8]* @global to i32) + %arg) U: [0,131071) S: full-set Exits: (ptrtoint ([0 x i8]* @global to i32) + %arg) LoopDispositions: { %bb11: Invariant } ; PTR16_IDX32-NEXT: %tmp14 = load i32, i32* %tmp13, align 4 ; PTR16_IDX32-NEXT: --> %tmp14 U: full-set S: full-set Exits: <> LoopDispositions: { %bb11: Variant } ; PTR16_IDX32-NEXT: %tmp18 = add i32 %tmp, 2 @@ -170,31 +187,48 @@ bb17: ; preds = %bb11 br label %bb11 } define hidden i32* @trunc_ptr_to_i128(i8* %arg, i32* %arg10) { -; X64-LABEL: 'trunc_ptr_to_i128' -; X64-NEXT: Classifying expressions for: @trunc_ptr_to_i128 -; X64-NEXT: %tmp = phi i32 [ 0, %bb ], [ %tmp18, %bb17 ] -; X64-NEXT: --> {0,+,2}<%bb11> U: [0,-1) S: [-2147483648,2147483647) Exits: <> LoopDispositions: { %bb11: Computable } -; X64-NEXT: %tmp12 = getelementptr i8, i8* %arg, i128 ptrtoint ([0 x i8]* @global to i128) -; X64-NEXT: --> (@global + %arg) U: full-set S: full-set Exits: (@global + %arg) LoopDispositions: { %bb11: Invariant } -; X64-NEXT: %tmp13 = bitcast i8* %tmp12 to i32* -; X64-NEXT: --> (@global + %arg) U: full-set S: full-set Exits: (@global + %arg) LoopDispositions: { %bb11: Invariant } -; X64-NEXT: %tmp14 = load i32, i32* %tmp13, align 4 -; X64-NEXT: --> %tmp14 U: full-set S: full-set Exits: <> LoopDispositions: { %bb11: Variant } -; X64-NEXT: %tmp18 = add i32 %tmp, 2 -; X64-NEXT: --> {2,+,2}<%bb11> U: [0,-1) S: [-2147483648,2147483647) Exits: <> LoopDispositions: { %bb11: Computable } -; X64-NEXT: Determining loop execution counts for: @trunc_ptr_to_i128 -; X64-NEXT: Loop %bb11: Unpredictable backedge-taken count. -; X64-NEXT: Loop %bb11: Unpredictable max backedge-taken count. -; X64-NEXT: Loop %bb11: Unpredictable predicated backedge-taken count. +; PTR64_IDX64-LABEL: 'trunc_ptr_to_i128' +; PTR64_IDX64-NEXT: Classifying expressions for: @trunc_ptr_to_i128 +; PTR64_IDX64-NEXT: %tmp = phi i32 [ 0, %bb ], [ %tmp18, %bb17 ] +; PTR64_IDX64-NEXT: --> {0,+,2}<%bb11> U: [0,-1) S: [-2147483648,2147483647) Exits: <> LoopDispositions: { %bb11: Computable } +; PTR64_IDX64-NEXT: %tmp12 = getelementptr i8, i8* %arg, i128 ptrtoint ([0 x i8]* @global to i128) +; PTR64_IDX64-NEXT: --> ((trunc i128 ptrtoint ([0 x i8]* @global to i128) to i64) + %arg) U: full-set S: full-set Exits: ((trunc i128 ptrtoint ([0 x i8]* @global to i128) to i64) + %arg) LoopDispositions: { %bb11: Invariant } +; PTR64_IDX64-NEXT: %tmp13 = bitcast i8* %tmp12 to i32* +; PTR64_IDX64-NEXT: --> ((trunc i128 ptrtoint ([0 x i8]* @global to i128) to i64) + %arg) U: full-set S: full-set Exits: ((trunc i128 ptrtoint ([0 x i8]* @global to i128) to i64) + %arg) LoopDispositions: { %bb11: Invariant } +; PTR64_IDX64-NEXT: %tmp14 = load i32, i32* %tmp13, align 4 +; PTR64_IDX64-NEXT: --> %tmp14 U: full-set S: full-set Exits: <> LoopDispositions: { %bb11: Variant } +; PTR64_IDX64-NEXT: %tmp18 = add i32 %tmp, 2 +; PTR64_IDX64-NEXT: --> {2,+,2}<%bb11> U: [0,-1) S: [-2147483648,2147483647) Exits: <> LoopDispositions: { %bb11: Computable } +; PTR64_IDX64-NEXT: Determining loop execution counts for: @trunc_ptr_to_i128 +; PTR64_IDX64-NEXT: Loop %bb11: Unpredictable backedge-taken count. +; PTR64_IDX64-NEXT: Loop %bb11: Unpredictable max backedge-taken count. +; PTR64_IDX64-NEXT: Loop %bb11: Unpredictable predicated backedge-taken count. +; +; PTR64_IDX32-LABEL: 'trunc_ptr_to_i128' +; PTR64_IDX32-NEXT: Classifying expressions for: @trunc_ptr_to_i128 +; PTR64_IDX32-NEXT: %tmp = phi i32 [ 0, %bb ], [ %tmp18, %bb17 ] +; PTR64_IDX32-NEXT: --> {0,+,2}<%bb11> U: [0,-1) S: [-2147483648,2147483647) Exits: <> LoopDispositions: { %bb11: Computable } +; PTR64_IDX32-NEXT: %tmp12 = getelementptr i8, i8* %arg, i128 ptrtoint ([0 x i8]* @global to i128) +; PTR64_IDX32-NEXT: --> ((trunc i128 ptrtoint ([0 x i8]* @global to i128) to i32) + %arg) U: full-set S: full-set Exits: ((trunc i128 ptrtoint ([0 x i8]* @global to i128) to i32) + %arg) LoopDispositions: { %bb11: Invariant } +; PTR64_IDX32-NEXT: %tmp13 = bitcast i8* %tmp12 to i32* +; PTR64_IDX32-NEXT: --> ((trunc i128 ptrtoint ([0 x i8]* @global to i128) to i32) + %arg) U: full-set S: full-set Exits: ((trunc i128 ptrtoint ([0 x i8]* @global to i128) to i32) + %arg) LoopDispositions: { %bb11: Invariant } +; PTR64_IDX32-NEXT: %tmp14 = load i32, i32* %tmp13, align 4 +; PTR64_IDX32-NEXT: --> %tmp14 U: full-set S: full-set Exits: <> LoopDispositions: { %bb11: Variant } +; PTR64_IDX32-NEXT: %tmp18 = add i32 %tmp, 2 +; PTR64_IDX32-NEXT: --> {2,+,2}<%bb11> U: [0,-1) S: [-2147483648,2147483647) Exits: <> LoopDispositions: { %bb11: Computable } +; PTR64_IDX32-NEXT: Determining loop execution counts for: @trunc_ptr_to_i128 +; PTR64_IDX32-NEXT: Loop %bb11: Unpredictable backedge-taken count. +; PTR64_IDX32-NEXT: Loop %bb11: Unpredictable max backedge-taken count. +; PTR64_IDX32-NEXT: Loop %bb11: Unpredictable predicated backedge-taken count. ; ; PTR16_IDX16-LABEL: 'trunc_ptr_to_i128' ; PTR16_IDX16-NEXT: Classifying expressions for: @trunc_ptr_to_i128 ; PTR16_IDX16-NEXT: %tmp = phi i32 [ 0, %bb ], [ %tmp18, %bb17 ] ; PTR16_IDX16-NEXT: --> {0,+,2}<%bb11> U: [0,-1) S: [-2147483648,2147483647) Exits: <> LoopDispositions: { %bb11: Computable } ; PTR16_IDX16-NEXT: %tmp12 = getelementptr i8, i8* %arg, i128 ptrtoint ([0 x i8]* @global to i128) -; PTR16_IDX16-NEXT: --> (@global + %arg) U: full-set S: full-set Exits: (@global + %arg) LoopDispositions: { %bb11: Invariant } +; PTR16_IDX16-NEXT: --> ((trunc i128 ptrtoint ([0 x i8]* @global to i128) to i16) + %arg) U: full-set S: full-set Exits: ((trunc i128 ptrtoint ([0 x i8]* @global to i128) to i16) + %arg) LoopDispositions: { %bb11: Invariant } ; PTR16_IDX16-NEXT: %tmp13 = bitcast i8* %tmp12 to i32* -; PTR16_IDX16-NEXT: --> (@global + %arg) U: full-set S: full-set Exits: (@global + %arg) LoopDispositions: { %bb11: Invariant } +; PTR16_IDX16-NEXT: --> ((trunc i128 ptrtoint ([0 x i8]* @global to i128) to i16) + %arg) U: full-set S: full-set Exits: ((trunc i128 ptrtoint ([0 x i8]* @global to i128) to i16) + %arg) LoopDispositions: { %bb11: Invariant } ; PTR16_IDX16-NEXT: %tmp14 = load i32, i32* %tmp13, align 4 ; PTR16_IDX16-NEXT: --> %tmp14 U: full-set S: full-set Exits: <> LoopDispositions: { %bb11: Variant } ; PTR16_IDX16-NEXT: %tmp18 = add i32 %tmp, 2 @@ -209,9 +243,9 @@ define hidden i32* @trunc_ptr_to_i128(i8* %arg, i32* %arg10) { ; PTR16_IDX32-NEXT: %tmp = phi i32 [ 0, %bb ], [ %tmp18, %bb17 ] ; PTR16_IDX32-NEXT: --> {0,+,2}<%bb11> U: [0,-1) S: [-2147483648,2147483647) Exits: <> LoopDispositions: { %bb11: Computable } ; PTR16_IDX32-NEXT: %tmp12 = getelementptr i8, i8* %arg, i128 ptrtoint ([0 x i8]* @global to i128) -; PTR16_IDX32-NEXT: --> (@global + %arg) U: [0,131071) S: full-set Exits: (@global + %arg) LoopDispositions: { %bb11: Invariant } +; PTR16_IDX32-NEXT: --> ((trunc i128 ptrtoint ([0 x i8]* @global to i128) to i32) + %arg) U: [0,131071) S: full-set Exits: ((trunc i128 ptrtoint ([0 x i8]* @global to i128) to i32) + %arg) LoopDispositions: { %bb11: Invariant } ; PTR16_IDX32-NEXT: %tmp13 = bitcast i8* %tmp12 to i32* -; PTR16_IDX32-NEXT: --> (@global + %arg) U: [0,131071) S: full-set Exits: (@global + %arg) LoopDispositions: { %bb11: Invariant } +; PTR16_IDX32-NEXT: --> ((trunc i128 ptrtoint ([0 x i8]* @global to i128) to i32) + %arg) U: [0,131071) S: full-set Exits: ((trunc i128 ptrtoint ([0 x i8]* @global to i128) to i32) + %arg) LoopDispositions: { %bb11: Invariant } ; PTR16_IDX32-NEXT: %tmp14 = load i32, i32* %tmp13, align 4 ; PTR16_IDX32-NEXT: --> %tmp14 U: full-set S: full-set Exits: <> LoopDispositions: { %bb11: Variant } ; PTR16_IDX32-NEXT: %tmp18 = add i32 %tmp, 2 @@ -241,49 +275,27 @@ bb17: ; preds = %bb11 } define void @zext_ptr_to_i32(i32 %arg, i32 %arg6) { -; PTR64_IDX64-LABEL: 'zext_ptr_to_i32' -; PTR64_IDX64-NEXT: Classifying expressions for: @zext_ptr_to_i32 -; PTR64_IDX64-NEXT: %tmp = sub i32 %arg, ptrtoint ([0 x i8]* @global to i32) -; PTR64_IDX64-NEXT: --> ((-1 * (trunc [0 x i8]* @global to i32)) + %arg) U: full-set S: full-set Exits: ((-1 * (trunc [0 x i8]* @global to i32)) + %arg) LoopDispositions: { %bb7: Invariant } -; PTR64_IDX64-NEXT: %tmp9 = select i1 %tmp8, i16 0, i16 1 -; PTR64_IDX64-NEXT: --> %tmp9 U: [0,2) S: [-2,2) Exits: <> LoopDispositions: { %bb7: Variant } -; PTR64_IDX64-NEXT: Determining loop execution counts for: @zext_ptr_to_i32 -; PTR64_IDX64-NEXT: Loop %bb7: Unpredictable backedge-taken count. -; PTR64_IDX64-NEXT: Loop %bb7: Unpredictable max backedge-taken count. -; PTR64_IDX64-NEXT: Loop %bb7: Unpredictable predicated backedge-taken count. +; X64-LABEL: 'zext_ptr_to_i32' +; X64-NEXT: Classifying expressions for: @zext_ptr_to_i32 +; X64-NEXT: %tmp = sub i32 %arg, ptrtoint ([0 x i8]* @global to i32) +; X64-NEXT: --> ((-1 * ptrtoint ([0 x i8]* @global to i32)) + %arg) U: full-set S: full-set Exits: ((-1 * ptrtoint ([0 x i8]* @global to i32)) + %arg) LoopDispositions: { %bb7: Invariant } +; X64-NEXT: %tmp9 = select i1 %tmp8, i16 0, i16 1 +; X64-NEXT: --> %tmp9 U: [0,2) S: [-2,2) Exits: <> LoopDispositions: { %bb7: Variant } +; X64-NEXT: Determining loop execution counts for: @zext_ptr_to_i32 +; X64-NEXT: Loop %bb7: Unpredictable backedge-taken count. +; X64-NEXT: Loop %bb7: Unpredictable max backedge-taken count. +; X64-NEXT: Loop %bb7: Unpredictable predicated backedge-taken count. ; -; PTR64_IDX32-LABEL: 'zext_ptr_to_i32' -; PTR64_IDX32-NEXT: Classifying expressions for: @zext_ptr_to_i32 -; PTR64_IDX32-NEXT: %tmp = sub i32 %arg, ptrtoint ([0 x i8]* @global to i32) -; PTR64_IDX32-NEXT: --> ((-1 * @global) + %arg) U: full-set S: full-set Exits: ((-1 * @global) + %arg) LoopDispositions: { %bb7: Invariant } -; PTR64_IDX32-NEXT: %tmp9 = select i1 %tmp8, i16 0, i16 1 -; PTR64_IDX32-NEXT: --> %tmp9 U: [0,2) S: [-2,2) Exits: <> LoopDispositions: { %bb7: Variant } -; PTR64_IDX32-NEXT: Determining loop execution counts for: @zext_ptr_to_i32 -; PTR64_IDX32-NEXT: Loop %bb7: Unpredictable backedge-taken count. -; PTR64_IDX32-NEXT: Loop %bb7: Unpredictable max backedge-taken count. -; PTR64_IDX32-NEXT: Loop %bb7: Unpredictable predicated backedge-taken count. -; -; PTR16_IDX16-LABEL: 'zext_ptr_to_i32' -; PTR16_IDX16-NEXT: Classifying expressions for: @zext_ptr_to_i32 -; PTR16_IDX16-NEXT: %tmp = sub i32 %arg, ptrtoint ([0 x i8]* @global to i32) -; PTR16_IDX16-NEXT: --> ((-1 * (zext [0 x i8]* @global to i32)) + %arg) U: full-set S: full-set Exits: ((-1 * (zext [0 x i8]* @global to i32)) + %arg) LoopDispositions: { %bb7: Invariant } -; PTR16_IDX16-NEXT: %tmp9 = select i1 %tmp8, i16 0, i16 1 -; PTR16_IDX16-NEXT: --> %tmp9 U: [0,2) S: [-2,2) Exits: <> LoopDispositions: { %bb7: Variant } -; PTR16_IDX16-NEXT: Determining loop execution counts for: @zext_ptr_to_i32 -; PTR16_IDX16-NEXT: Loop %bb7: Unpredictable backedge-taken count. -; PTR16_IDX16-NEXT: Loop %bb7: Unpredictable max backedge-taken count. -; PTR16_IDX16-NEXT: Loop %bb7: Unpredictable predicated backedge-taken count. -; -; PTR16_IDX32-LABEL: 'zext_ptr_to_i32' -; PTR16_IDX32-NEXT: Classifying expressions for: @zext_ptr_to_i32 -; PTR16_IDX32-NEXT: %tmp = sub i32 %arg, ptrtoint ([0 x i8]* @global to i32) -; PTR16_IDX32-NEXT: --> ((-1 * @global) + %arg) U: full-set S: full-set Exits: ((-1 * @global) + %arg) LoopDispositions: { %bb7: Invariant } -; PTR16_IDX32-NEXT: %tmp9 = select i1 %tmp8, i16 0, i16 1 -; PTR16_IDX32-NEXT: --> %tmp9 U: [0,2) S: [-2,2) Exits: <> LoopDispositions: { %bb7: Variant } -; PTR16_IDX32-NEXT: Determining loop execution counts for: @zext_ptr_to_i32 -; PTR16_IDX32-NEXT: Loop %bb7: Unpredictable backedge-taken count. -; PTR16_IDX32-NEXT: Loop %bb7: Unpredictable max backedge-taken count. -; PTR16_IDX32-NEXT: Loop %bb7: Unpredictable predicated backedge-taken count. +; X32-LABEL: 'zext_ptr_to_i32' +; X32-NEXT: Classifying expressions for: @zext_ptr_to_i32 +; X32-NEXT: %tmp = sub i32 %arg, ptrtoint ([0 x i8]* @global to i32) +; X32-NEXT: --> ((-1 * ptrtoint ([0 x i8]* @global to i32)) + %arg) U: full-set S: full-set Exits: ((-1 * ptrtoint ([0 x i8]* @global to i32)) + %arg) LoopDispositions: { %bb7: Invariant } +; X32-NEXT: %tmp9 = select i1 %tmp8, i16 0, i16 1 +; X32-NEXT: --> %tmp9 U: [0,2) S: [-2,2) Exits: <> LoopDispositions: { %bb7: Variant } +; X32-NEXT: Determining loop execution counts for: @zext_ptr_to_i32 +; X32-NEXT: Loop %bb7: Unpredictable backedge-taken count. +; X32-NEXT: Loop %bb7: Unpredictable max backedge-taken count. +; X32-NEXT: Loop %bb7: Unpredictable predicated backedge-taken count. ; bb: br label %bb7 @@ -300,38 +312,16 @@ bb10: ; preds = %bb7 } define void @sext_to_i32(i32 %arg, i32 %arg6) { -; X64-LABEL: 'sext_to_i32' -; X64-NEXT: Classifying expressions for: @sext_to_i32 -; X64-NEXT: %tmp = sub i32 %arg, sext (i16 ptrtoint ([0 x i8]* @global to i16) to i32) -; X64-NEXT: --> ((-1 * (sext i16 (trunc [0 x i8]* @global to i16) to i32)) + %arg) U: full-set S: full-set Exits: ((-1 * (sext i16 (trunc [0 x i8]* @global to i16) to i32)) + %arg) LoopDispositions: { %bb7: Invariant } -; X64-NEXT: %tmp9 = select i1 %tmp8, i16 0, i16 1 -; X64-NEXT: --> %tmp9 U: [0,2) S: [-2,2) Exits: <> LoopDispositions: { %bb7: Variant } -; X64-NEXT: Determining loop execution counts for: @sext_to_i32 -; X64-NEXT: Loop %bb7: Unpredictable backedge-taken count. -; X64-NEXT: Loop %bb7: Unpredictable max backedge-taken count. -; X64-NEXT: Loop %bb7: Unpredictable predicated backedge-taken count. -; -; PTR16_IDX16-LABEL: 'sext_to_i32' -; PTR16_IDX16-NEXT: Classifying expressions for: @sext_to_i32 -; PTR16_IDX16-NEXT: %tmp = sub i32 %arg, sext (i16 ptrtoint ([0 x i8]* @global to i16) to i32) -; PTR16_IDX16-NEXT: --> ((-1 * (sext [0 x i8]* @global to i32)) + %arg) U: full-set S: full-set Exits: ((-1 * (sext [0 x i8]* @global to i32)) + %arg) LoopDispositions: { %bb7: Invariant } -; PTR16_IDX16-NEXT: %tmp9 = select i1 %tmp8, i16 0, i16 1 -; PTR16_IDX16-NEXT: --> %tmp9 U: [0,2) S: [-2,2) Exits: <> LoopDispositions: { %bb7: Variant } -; PTR16_IDX16-NEXT: Determining loop execution counts for: @sext_to_i32 -; PTR16_IDX16-NEXT: Loop %bb7: Unpredictable backedge-taken count. -; PTR16_IDX16-NEXT: Loop %bb7: Unpredictable max backedge-taken count. -; PTR16_IDX16-NEXT: Loop %bb7: Unpredictable predicated backedge-taken count. -; -; PTR16_IDX32-LABEL: 'sext_to_i32' -; PTR16_IDX32-NEXT: Classifying expressions for: @sext_to_i32 -; PTR16_IDX32-NEXT: %tmp = sub i32 %arg, sext (i16 ptrtoint ([0 x i8]* @global to i16) to i32) -; PTR16_IDX32-NEXT: --> ((-1 * (sext i16 (trunc [0 x i8]* @global to i16) to i32)) + %arg) U: full-set S: full-set Exits: ((-1 * (sext i16 (trunc [0 x i8]* @global to i16) to i32)) + %arg) LoopDispositions: { %bb7: Invariant } -; PTR16_IDX32-NEXT: %tmp9 = select i1 %tmp8, i16 0, i16 1 -; PTR16_IDX32-NEXT: --> %tmp9 U: [0,2) S: [-2,2) Exits: <> LoopDispositions: { %bb7: Variant } -; PTR16_IDX32-NEXT: Determining loop execution counts for: @sext_to_i32 -; PTR16_IDX32-NEXT: Loop %bb7: Unpredictable backedge-taken count. -; PTR16_IDX32-NEXT: Loop %bb7: Unpredictable max backedge-taken count. -; PTR16_IDX32-NEXT: Loop %bb7: Unpredictable predicated backedge-taken count. +; ALL-LABEL: 'sext_to_i32' +; ALL-NEXT: Classifying expressions for: @sext_to_i32 +; ALL-NEXT: %tmp = sub i32 %arg, sext (i16 ptrtoint ([0 x i8]* @global to i16) to i32) +; ALL-NEXT: --> ((-1 * (sext i16 ptrtoint ([0 x i8]* @global to i16) to i32)) + %arg) U: full-set S: full-set Exits: ((-1 * (sext i16 ptrtoint ([0 x i8]* @global to i16) to i32)) + %arg) LoopDispositions: { %bb7: Invariant } +; ALL-NEXT: %tmp9 = select i1 %tmp8, i16 0, i16 1 +; ALL-NEXT: --> %tmp9 U: [0,2) S: [-2,2) Exits: <> LoopDispositions: { %bb7: Variant } +; ALL-NEXT: Determining loop execution counts for: @sext_to_i32 +; ALL-NEXT: Loop %bb7: Unpredictable backedge-taken count. +; ALL-NEXT: Loop %bb7: Unpredictable max backedge-taken count. +; ALL-NEXT: Loop %bb7: Unpredictable predicated backedge-taken count. ; bb: br label %bb7 @@ -346,3 +336,55 @@ bb7: ; preds = %bb7, %bb bb10: ; preds = %bb7 ret void } + +define i64 @sext_like_noop(i32 %n) { +; X64-LABEL: 'sext_like_noop' +; X64-NEXT: Classifying expressions for: @sext_like_noop +; X64-NEXT: %ii = sext i32 %i to i64 +; X64-NEXT: --> (sext i32 {1,+,1}<%for.body> to i64) U: [-2147483648,2147483648) S: [-2147483648,2147483648) --> (sext i32 (-1 + ptrtoint (i64 (i32)* @sext_like_noop to i32)) to i64) U: [-2147483648,2147483648) S: [-2147483648,2147483648) +; X64-NEXT: %div = sdiv i64 55555, %ii +; X64-NEXT: --> %div U: full-set S: full-set --> sdiv (i64 55555, i64 sext (i32 add (i32 ptrtoint (i64 (i32)* @sext_like_noop to i32), i32 -1) to i64)) U: full-set S: full-set +; X64-NEXT: %i = phi i32 [ %inc, %for.body ], [ 1, %entry ] +; X64-NEXT: --> {1,+,1}<%for.body> U: [1,0) S: [1,0) Exits: (-1 + ptrtoint (i64 (i32)* @sext_like_noop to i32)) LoopDispositions: { %for.body: Computable } +; X64-NEXT: %inc = add nuw i32 %i, 1 +; X64-NEXT: --> {2,+,1}<%for.body> U: [2,0) S: [2,0) Exits: ptrtoint (i64 (i32)* @sext_like_noop to i32) LoopDispositions: { %for.body: Computable } +; X64-NEXT: Determining loop execution counts for: @sext_like_noop +; X64-NEXT: Loop %for.body: backedge-taken count is (-2 + ptrtoint (i64 (i32)* @sext_like_noop to i32)) +; X64-NEXT: Loop %for.body: max backedge-taken count is -1 +; X64-NEXT: Loop %for.body: Predicated backedge-taken count is (-2 + ptrtoint (i64 (i32)* @sext_like_noop to i32)) +; X64-NEXT: Predicates: +; X64: Loop %for.body: Trip multiple is 1 +; +; X32-LABEL: 'sext_like_noop' +; X32-NEXT: Classifying expressions for: @sext_like_noop +; X32-NEXT: %ii = sext i32 %i to i64 +; X32-NEXT: --> (sext i32 {1,+,1}<%for.body> to i64) U: [-2147483648,2147483648) S: [-2147483648,2147483648) --> (sext i32 (-1 + ptrtoint (i64 (i32)* @sext_like_noop to i32)) to i64) U: [-1,65535) S: [-65537,65535) +; X32-NEXT: %div = sdiv i64 55555, %ii +; X32-NEXT: --> %div U: full-set S: full-set --> sdiv (i64 55555, i64 sext (i32 add (i32 ptrtoint (i64 (i32)* @sext_like_noop to i32), i32 -1) to i64)) U: full-set S: full-set +; X32-NEXT: %i = phi i32 [ %inc, %for.body ], [ 1, %entry ] +; X32-NEXT: --> {1,+,1}<%for.body> U: [1,0) S: [1,0) Exits: (-1 + ptrtoint (i64 (i32)* @sext_like_noop to i32)) LoopDispositions: { %for.body: Computable } +; X32-NEXT: %inc = add nuw i32 %i, 1 +; X32-NEXT: --> {2,+,1}<%for.body> U: [2,0) S: [2,0) Exits: ptrtoint (i64 (i32)* @sext_like_noop to i32) LoopDispositions: { %for.body: Computable } +; X32-NEXT: Determining loop execution counts for: @sext_like_noop +; X32-NEXT: Loop %for.body: backedge-taken count is (-2 + ptrtoint (i64 (i32)* @sext_like_noop to i32)) +; X32-NEXT: Loop %for.body: max backedge-taken count is -1 +; X32-NEXT: Loop %for.body: Predicated backedge-taken count is (-2 + ptrtoint (i64 (i32)* @sext_like_noop to i32)) +; X32-NEXT: Predicates: +; X32: Loop %for.body: Trip multiple is 1 +; +entry: + %cmp6 = icmp sgt i32 %n, 1 + br label %for.body + +for.cond.cleanup: + %ii = sext i32 %i to i64 + %div = sdiv i64 55555, %ii + ret i64 %div + +for.body: + %i = phi i32 [ %inc, %for.body ], [ 1, %entry ] + %inc = add nuw i32 %i, 1 + %exitcond = icmp eq i32 %inc, ptrtoint (i64 (i32)* @sext_like_noop to i32) + br i1 %exitcond, label %for.cond.cleanup, label %for.body +} +declare void @f(i64) diff --git a/llvm/test/Analysis/ScalarEvolution/ptrtoint.ll b/llvm/test/Analysis/ScalarEvolution/ptrtoint.ll index ac08fb24775e..e3e9330e241f 100644 --- a/llvm/test/Analysis/ScalarEvolution/ptrtoint.ll +++ b/llvm/test/Analysis/ScalarEvolution/ptrtoint.ll @@ -16,25 +16,25 @@ define void @ptrtoint(i8* %in, i64* %out0, i32* %out1, i16* %out2, i128* %out3) ; X64-LABEL: 'ptrtoint' ; X64-NEXT: Classifying expressions for: @ptrtoint ; X64-NEXT: %p0 = ptrtoint i8* %in to i64 -; X64-NEXT: --> %in U: full-set S: full-set +; X64-NEXT: --> %p0 U: full-set S: full-set ; X64-NEXT: %p1 = ptrtoint i8* %in to i32 -; X64-NEXT: --> (trunc i8* %in to i32) U: full-set S: full-set +; X64-NEXT: --> %p1 U: full-set S: full-set ; X64-NEXT: %p2 = ptrtoint i8* %in to i16 -; X64-NEXT: --> (trunc i8* %in to i16) U: full-set S: full-set +; X64-NEXT: --> %p2 U: full-set S: full-set ; X64-NEXT: %p3 = ptrtoint i8* %in to i128 -; X64-NEXT: --> (zext i8* %in to i128) U: [0,18446744073709551616) S: [0,18446744073709551616) +; X64-NEXT: --> %p3 U: [0,18446744073709551616) S: [-18446744073709551616,18446744073709551616) ; X64-NEXT: Determining loop execution counts for: @ptrtoint ; ; X32-LABEL: 'ptrtoint' ; X32-NEXT: Classifying expressions for: @ptrtoint ; X32-NEXT: %p0 = ptrtoint i8* %in to i64 -; X32-NEXT: --> (zext i8* %in to i64) U: [0,4294967296) S: [0,4294967296) +; X32-NEXT: --> %p0 U: [0,4294967296) S: [-4294967296,4294967296) ; X32-NEXT: %p1 = ptrtoint i8* %in to i32 -; X32-NEXT: --> %in U: full-set S: full-set +; X32-NEXT: --> %p1 U: full-set S: full-set ; X32-NEXT: %p2 = ptrtoint i8* %in to i16 -; X32-NEXT: --> (trunc i8* %in to i16) U: full-set S: full-set +; X32-NEXT: --> %p2 U: full-set S: full-set ; X32-NEXT: %p3 = ptrtoint i8* %in to i128 -; X32-NEXT: --> (zext i8* %in to i128) U: [0,4294967296) S: [0,4294967296) +; X32-NEXT: --> %p3 U: [0,4294967296) S: [-4294967296,4294967296) ; X32-NEXT: Determining loop execution counts for: @ptrtoint ; %p0 = ptrtoint i8* %in to i64 @@ -53,25 +53,25 @@ define void @ptrtoint_as1(i8 addrspace(1)* %in, i64* %out0, i32* %out1, i16* %ou ; X64-LABEL: 'ptrtoint_as1' ; X64-NEXT: Classifying expressions for: @ptrtoint_as1 ; X64-NEXT: %p0 = ptrtoint i8 addrspace(1)* %in to i64 -; X64-NEXT: --> %in U: full-set S: full-set +; X64-NEXT: --> %p0 U: full-set S: full-set ; X64-NEXT: %p1 = ptrtoint i8 addrspace(1)* %in to i32 -; X64-NEXT: --> (trunc i8 addrspace(1)* %in to i32) U: full-set S: full-set +; X64-NEXT: --> %p1 U: full-set S: full-set ; X64-NEXT: %p2 = ptrtoint i8 addrspace(1)* %in to i16 -; X64-NEXT: --> (trunc i8 addrspace(1)* %in to i16) U: full-set S: full-set +; X64-NEXT: --> %p2 U: full-set S: full-set ; X64-NEXT: %p3 = ptrtoint i8 addrspace(1)* %in to i128 -; X64-NEXT: --> (zext i8 addrspace(1)* %in to i128) U: [0,18446744073709551616) S: [0,18446744073709551616) +; X64-NEXT: --> %p3 U: [0,18446744073709551616) S: [-18446744073709551616,18446744073709551616) ; X64-NEXT: Determining loop execution counts for: @ptrtoint_as1 ; ; X32-LABEL: 'ptrtoint_as1' ; X32-NEXT: Classifying expressions for: @ptrtoint_as1 ; X32-NEXT: %p0 = ptrtoint i8 addrspace(1)* %in to i64 -; X32-NEXT: --> (zext i8 addrspace(1)* %in to i64) U: [0,4294967296) S: [0,4294967296) +; X32-NEXT: --> %p0 U: [0,4294967296) S: [-4294967296,4294967296) ; X32-NEXT: %p1 = ptrtoint i8 addrspace(1)* %in to i32 -; X32-NEXT: --> %in U: full-set S: full-set +; X32-NEXT: --> %p1 U: full-set S: full-set ; X32-NEXT: %p2 = ptrtoint i8 addrspace(1)* %in to i16 -; X32-NEXT: --> (trunc i8 addrspace(1)* %in to i16) U: full-set S: full-set +; X32-NEXT: --> %p2 U: full-set S: full-set ; X32-NEXT: %p3 = ptrtoint i8 addrspace(1)* %in to i128 -; X32-NEXT: --> (zext i8 addrspace(1)* %in to i128) U: [0,4294967296) S: [0,4294967296) +; X32-NEXT: --> %p3 U: [0,4294967296) S: [-4294967296,4294967296) ; X32-NEXT: Determining loop execution counts for: @ptrtoint_as1 ; %p0 = ptrtoint i8 addrspace(1)* %in to i64 @@ -92,7 +92,7 @@ define void @ptrtoint_of_bitcast(i8* %in, i64* %out0) { ; X64-NEXT: %in_casted = bitcast i8* %in to float* ; X64-NEXT: --> %in U: full-set S: full-set ; X64-NEXT: %p0 = ptrtoint float* %in_casted to i64 -; X64-NEXT: --> %in U: full-set S: full-set +; X64-NEXT: --> %p0 U: full-set S: full-set ; X64-NEXT: Determining loop execution counts for: @ptrtoint_of_bitcast ; ; X32-LABEL: 'ptrtoint_of_bitcast' @@ -100,7 +100,7 @@ define void @ptrtoint_of_bitcast(i8* %in, i64* %out0) { ; X32-NEXT: %in_casted = bitcast i8* %in to float* ; X32-NEXT: --> %in U: full-set S: full-set ; X32-NEXT: %p0 = ptrtoint float* %in_casted to i64 -; X32-NEXT: --> (zext i8* %in to i64) U: [0,4294967296) S: [0,4294967296) +; X32-NEXT: --> %p0 U: [0,4294967296) S: [-4294967296,4294967296) ; X32-NEXT: Determining loop execution counts for: @ptrtoint_of_bitcast ; %in_casted = bitcast i8* %in to float* @@ -116,7 +116,7 @@ define void @ptrtoint_of_addrspacecast(i8* %in, i64* %out0) { ; X64-NEXT: %in_casted = addrspacecast i8* %in to i8 addrspace(1)* ; X64-NEXT: --> %in_casted U: full-set S: full-set ; X64-NEXT: %p0 = ptrtoint i8 addrspace(1)* %in_casted to i64 -; X64-NEXT: --> %in_casted U: full-set S: full-set +; X64-NEXT: --> %p0 U: full-set S: full-set ; X64-NEXT: Determining loop execution counts for: @ptrtoint_of_addrspacecast ; ; X32-LABEL: 'ptrtoint_of_addrspacecast' @@ -124,7 +124,7 @@ define void @ptrtoint_of_addrspacecast(i8* %in, i64* %out0) { ; X32-NEXT: %in_casted = addrspacecast i8* %in to i8 addrspace(1)* ; X32-NEXT: --> %in_casted U: full-set S: full-set ; X32-NEXT: %p0 = ptrtoint i8 addrspace(1)* %in_casted to i64 -; X32-NEXT: --> (zext i8 addrspace(1)* %in_casted to i64) U: [0,4294967296) S: [0,4294967296) +; X32-NEXT: --> %p0 U: [0,4294967296) S: [-4294967296,4294967296) ; X32-NEXT: Determining loop execution counts for: @ptrtoint_of_addrspacecast ; %in_casted = addrspacecast i8* %in to i8 addrspace(1)* @@ -140,7 +140,7 @@ define void @ptrtoint_of_inttoptr(i64 %in, i64* %out0) { ; X64-NEXT: %in_casted = inttoptr i64 %in to i8* ; X64-NEXT: --> %in_casted U: full-set S: full-set ; X64-NEXT: %p0 = ptrtoint i8* %in_casted to i64 -; X64-NEXT: --> %in_casted U: full-set S: full-set +; X64-NEXT: --> %p0 U: full-set S: full-set ; X64-NEXT: Determining loop execution counts for: @ptrtoint_of_inttoptr ; ; X32-LABEL: 'ptrtoint_of_inttoptr' @@ -148,7 +148,7 @@ define void @ptrtoint_of_inttoptr(i64 %in, i64* %out0) { ; X32-NEXT: %in_casted = inttoptr i64 %in to i8* ; X32-NEXT: --> %in_casted U: full-set S: full-set ; X32-NEXT: %p0 = ptrtoint i8* %in_casted to i64 -; X32-NEXT: --> (zext i8* %in_casted to i64) U: [0,4294967296) S: [0,4294967296) +; X32-NEXT: --> %p0 U: [0,4294967296) S: [-4294967296,4294967296) ; X32-NEXT: Determining loop execution counts for: @ptrtoint_of_inttoptr ; %in_casted = inttoptr i64 %in to i8* @@ -197,17 +197,11 @@ define void @ptrtoint_of_nullptr(i64* %out0) { ; A constant inttoptr argument of an ptrtoint is still bad. define void @ptrtoint_of_constantexpr_inttoptr(i64* %out0) { -; X64-LABEL: 'ptrtoint_of_constantexpr_inttoptr' -; X64-NEXT: Classifying expressions for: @ptrtoint_of_constantexpr_inttoptr -; X64-NEXT: %p0 = ptrtoint i8* inttoptr (i64 42 to i8*) to i64 -; X64-NEXT: --> inttoptr (i64 42 to i8*) U: [42,43) S: [-64,64) -; X64-NEXT: Determining loop execution counts for: @ptrtoint_of_constantexpr_inttoptr -; -; X32-LABEL: 'ptrtoint_of_constantexpr_inttoptr' -; X32-NEXT: Classifying expressions for: @ptrtoint_of_constantexpr_inttoptr -; X32-NEXT: %p0 = ptrtoint i8* inttoptr (i64 42 to i8*) to i64 -; X32-NEXT: --> (zext i8* inttoptr (i64 42 to i8*) to i64) U: [42,43) S: [0,4294967296) -; X32-NEXT: Determining loop execution counts for: @ptrtoint_of_constantexpr_inttoptr +; ALL-LABEL: 'ptrtoint_of_constantexpr_inttoptr' +; ALL-NEXT: Classifying expressions for: @ptrtoint_of_constantexpr_inttoptr +; ALL-NEXT: %p0 = ptrtoint i8* inttoptr (i64 42 to i8*) to i64 +; ALL-NEXT: --> %p0 U: [42,43) S: [-64,64) +; ALL-NEXT: Determining loop execution counts for: @ptrtoint_of_constantexpr_inttoptr ; %p0 = ptrtoint i8* inttoptr (i64 42 to i8*) to i64 store i64 %p0, i64* %out0 diff --git a/llvm/test/CodeGen/ARM/lsr-undef-in-binop.ll b/llvm/test/CodeGen/ARM/lsr-undef-in-binop.ll index e73397214475..564328d99998 100644 --- a/llvm/test/CodeGen/ARM/lsr-undef-in-binop.ll +++ b/llvm/test/CodeGen/ARM/lsr-undef-in-binop.ll @@ -186,9 +186,7 @@ define linkonce_odr i32 @vector_insert(%"class.std::__1::vector.182"*, [1 x i32] br i1 %114, label %124, label %115 ; CHECK-LABEL: .preheader: -; CHECK-NEXT: [[NEG_NEW:%[0-9]+]] = sub i32 0, [[NEW_CAST]] -; CHECK-NEXT: getelementptr i8, i8* %97, i32 [[NEG_NEW]] - +; CHECK-NEXT: sub i32 [[OLD_CAST]], [[NEW_CAST]] ;