diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp index 7c1255578a20..59b2953c3b91 100644 --- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp +++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp @@ -815,11 +815,17 @@ static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1, // Because array indices greater than the number of elements are valid in // GEPs, unless we know the intermediate indices are identical between // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't - // partially overlap. + // partially overlap. We also need to check that the loaded size matches + // the element size, otherwise we could still have overlap. + const uint64_t ElementSize = + DL.getTypeStoreSize(cast<SequentialType>(Ty)->getElementType()); + if (V1Size != ElementSize || V2Size != ElementSize) + return MayAlias; + for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i) if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1)) return MayAlias; - + // Now we know that the array/pointer that GEP1 indexes into and that // that GEP2 indexes into must either precisely overlap or be disjoint. // Because they cannot partially overlap and because fields in an array diff --git a/llvm/test/Analysis/BasicAA/sequential-gep.ll b/llvm/test/Analysis/BasicAA/sequential-gep.ll index f59843742f4b..c17a782aa04b 100644 --- a/llvm/test/Analysis/BasicAA/sequential-gep.ll +++ b/llvm/test/Analysis/BasicAA/sequential-gep.ll @@ -40,4 +40,15 @@ define void @t4([8 x i32]* %p, i32 %addend, i32* %q) { ret void } +; CHECK: Function: t5 +; CHECK: PartialAlias: i32* %gep2, i64* %bc +define void @t5([8 x i32]* %p, i32 %addend, i32* %q) { + %knownnonzero = load i32, i32* %q, !range !0 + %add = add nsw nuw i32 %addend, %knownnonzero + %gep1 = getelementptr [8 x i32], [8 x i32]* %p, i32 2, i32 %addend + %gep2 = getelementptr [8 x i32], [8 x i32]* %p, i32 2, i32 %add + %bc = bitcast i32* %gep1 to i64* + ret void +} + !0 = !{ i32 1, i32 5 }