forked from OSchip/llvm-project
[instcombine] Infer alignment for aligned_alloc with potentially zero size
This change removes a previous restriction where we had to prove the allocation performed by aligned_alloc was non-zero in size before using the align parameter to annotate the result. I believe this was conservatism around the C11 specification of this routine which allowed UB when size was not a multiple of alignment, but if so, it was a partial one at best. (ex: align 32, size 16 was equally UB, but not restricted) The spec has since been clarified to require nullptr return, not UB. A nullptr - the documented return for this function on failure for all cases after UB mentioned above was removed - is trivially aligned for any power of two. This isn't totally new behavior even for this transform, we'd previously annotate potentially failing allocs (e.g. huge sizes) meaning we were putting align on potentially null pointers anyways. This change simpy does the same for all failure modes.
This commit is contained in:
parent
4b5d59ffd0
commit
f4c54683d6
|
@ -2594,8 +2594,7 @@ void InstCombinerImpl::annotateAnyAllocSite(CallBase &Call, const TargetLibraryI
|
|||
return;
|
||||
|
||||
ConstantInt *Op0C = dyn_cast<ConstantInt>(Call.getOperand(0));
|
||||
if (Op0C && Op0C->getValue().ult(llvm::Value::MaximumAlignment) &&
|
||||
isKnownNonZero(Call.getOperand(1), DL, 0, &AC, &Call, &DT)) {
|
||||
if (Op0C && Op0C->getValue().ult(llvm::Value::MaximumAlignment)) {
|
||||
uint64_t AlignmentVal = Op0C->getZExtValue();
|
||||
if (llvm::isPowerOf2_64(AlignmentVal)) {
|
||||
Call.removeRetAttr(Attribute::Alignment);
|
||||
|
|
|
@ -55,7 +55,7 @@ define noalias i8* @aligned_alloc_unknown_size_nonzero(i1 %c) {
|
|||
define noalias i8* @aligned_alloc_unknown_size_possibly_zero(i1 %c) {
|
||||
; CHECK-LABEL: @aligned_alloc_unknown_size_possibly_zero(
|
||||
; CHECK-NEXT: [[SIZE:%.*]] = select i1 [[C:%.*]], i64 64, i64 0
|
||||
; CHECK-NEXT: [[CALL:%.*]] = tail call noalias i8* @aligned_alloc(i64 32, i64 [[SIZE]])
|
||||
; CHECK-NEXT: [[CALL:%.*]] = tail call noalias align 32 i8* @aligned_alloc(i64 32, i64 [[SIZE]])
|
||||
; CHECK-NEXT: ret i8* [[CALL]]
|
||||
;
|
||||
%size = select i1 %c, i64 64, i64 0
|
||||
|
@ -78,7 +78,7 @@ define noalias i8* @aligned_alloc_dynamic_args(i64 %align, i64 %size) {
|
|||
; CHECK-LABEL: @aligned_alloc_dynamic_args(
|
||||
; CHECK-NEXT: [[CALL:%.*]] = tail call noalias dereferenceable_or_null(1024) i8* @aligned_alloc(i64 [[ALIGN:%.*]], i64 1024)
|
||||
; CHECK-NEXT: [[CALL_1:%.*]] = tail call noalias dereferenceable_or_null(1024) i8* @aligned_alloc(i64 0, i64 1024)
|
||||
; CHECK-NEXT: [[CALL_2:%.*]] = tail call noalias i8* @aligned_alloc(i64 32, i64 [[SIZE:%.*]])
|
||||
; CHECK-NEXT: [[CALL_2:%.*]] = tail call noalias align 32 i8* @aligned_alloc(i64 32, i64 [[SIZE:%.*]])
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = call i8* @foo(i8* [[CALL]], i8* [[CALL_1]], i8* [[CALL_2]])
|
||||
; CHECK-NEXT: ret i8* [[CALL]]
|
||||
;
|
||||
|
@ -111,10 +111,10 @@ define noalias i8* @memalign_unknown_size_nonzero(i1 %c) {
|
|||
}
|
||||
|
||||
define noalias i8* @memalign_unknown_size_possibly_zero(i1 %c) {
|
||||
; CHECK-LABEL: @memalign_unknown_size_possibly_zero(
|
||||
; CHECK-NEXT: [[SIZE:%.*]] = select i1 [[C:%.*]], i64 64, i64 0
|
||||
; CHECK-NEXT: [[CALL:%.*]] = tail call noalias i8* @memalign(i64 32, i64 [[SIZE]])
|
||||
; CHECK-NEXT: ret i8* [[CALL]]
|
||||
; GNU-LABEL: @memalign_unknown_size_possibly_zero(
|
||||
; GNU-NEXT: [[SIZE:%.*]] = select i1 [[C:%.*]], i64 64, i64 0
|
||||
; GNU-NEXT: [[CALL:%.*]] = tail call noalias align 32 i8* @memalign(i64 32, i64 [[SIZE]])
|
||||
; GNU-NEXT: ret i8* [[CALL]]
|
||||
;
|
||||
%size = select i1 %c, i64 64, i64 0
|
||||
%call = tail call noalias i8* @memalign(i64 32, i64 %size)
|
||||
|
|
Loading…
Reference in New Issue