forked from OSchip/llvm-project
[Codegen] Emit both AssumeAlignedAttr and AllocAlignAttr assumptions if they exist
Summary: We shouldn't be just giving up if we find one of them (like we currently do with `AssumeAlignedAttr`), we should emit them all. As the tests show, even if we materialized good knowledge from `__attribute__((assume_aligned(32)`, it doesn't mean `__attribute__((alloc_align([...])))` info won't be useful. It might be, but that isn't given. Reviewers: erichkeane, jdoerfert, aaron.ballman Reviewed By: erichkeane Subscribers: cfe-commits Tags: #clang Differential Revision: https://reviews.llvm.org/D72979
This commit is contained in:
parent
cf263807a6
commit
372cb38f45
|
@ -4628,7 +4628,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
|
|||
llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
|
||||
EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
|
||||
AlignmentCI, OffsetValue);
|
||||
} else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
|
||||
}
|
||||
if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
|
||||
llvm::Value *AlignmentVal = CallArgs[AA->getParamIndex().getLLVMIndex()]
|
||||
.getRValue(*this)
|
||||
.getScalarVal();
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
void *my_aligned_alloc(int size, int alignment) __attribute__((assume_aligned(32), alloc_align(2)));
|
||||
|
||||
// CHECK-LABEL: @t0_immediate0(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[CALL:%.*]] = call i8* @my_aligned_alloc(i32 320, i32 16)
|
||||
// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64
|
||||
// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
|
||||
// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
|
||||
// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
|
||||
// CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint i8* [[CALL]] to i64
|
||||
// CHECK-NEXT: [[MASKEDPTR2:%.*]] = and i64 [[PTRINT1]], 15
|
||||
// CHECK-NEXT: [[MASKCOND3:%.*]] = icmp eq i64 [[MASKEDPTR2]], 0
|
||||
// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND3]])
|
||||
// CHECK-NEXT: ret i8* [[CALL]]
|
||||
//
|
||||
void *t0_immediate0() {
|
||||
return my_aligned_alloc(320, 16);
|
||||
};
|
||||
|
||||
// CHECK-LABEL: @t1_immediate1(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[CALL:%.*]] = call i8* @my_aligned_alloc(i32 320, i32 32)
|
||||
// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64
|
||||
// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
|
||||
// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
|
||||
// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
|
||||
// CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint i8* [[CALL]] to i64
|
||||
// CHECK-NEXT: [[MASKEDPTR2:%.*]] = and i64 [[PTRINT1]], 31
|
||||
// CHECK-NEXT: [[MASKCOND3:%.*]] = icmp eq i64 [[MASKEDPTR2]], 0
|
||||
// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND3]])
|
||||
// CHECK-NEXT: ret i8* [[CALL]]
|
||||
//
|
||||
void *t1_immediate1() {
|
||||
return my_aligned_alloc(320, 32);
|
||||
};
|
||||
|
||||
// CHECK-LABEL: @t2_immediate2(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[CALL:%.*]] = call i8* @my_aligned_alloc(i32 320, i32 64)
|
||||
// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64
|
||||
// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
|
||||
// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
|
||||
// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
|
||||
// CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint i8* [[CALL]] to i64
|
||||
// CHECK-NEXT: [[MASKEDPTR2:%.*]] = and i64 [[PTRINT1]], 63
|
||||
// CHECK-NEXT: [[MASKCOND3:%.*]] = icmp eq i64 [[MASKEDPTR2]], 0
|
||||
// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND3]])
|
||||
// CHECK-NEXT: ret i8* [[CALL]]
|
||||
//
|
||||
void *t2_immediate2() {
|
||||
return my_aligned_alloc(320, 64);
|
||||
};
|
||||
|
||||
// CHECK-LABEL: @t3_variable(
|
||||
// CHECK-NEXT: entry:
|
||||
// CHECK-NEXT: [[ALIGNMENT_ADDR:%.*]] = alloca i32, align 4
|
||||
// CHECK-NEXT: store i32 [[ALIGNMENT:%.*]], i32* [[ALIGNMENT_ADDR]], align 4
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ALIGNMENT_ADDR]], align 4
|
||||
// CHECK-NEXT: [[CALL:%.*]] = call i8* @my_aligned_alloc(i32 320, i32 [[TMP0]])
|
||||
// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64
|
||||
// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
|
||||
// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
|
||||
// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
|
||||
// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[TMP0]] to i64
|
||||
// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
|
||||
// CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint i8* [[CALL]] to i64
|
||||
// CHECK-NEXT: [[MASKEDPTR2:%.*]] = and i64 [[PTRINT1]], [[MASK]]
|
||||
// CHECK-NEXT: [[MASKCOND3:%.*]] = icmp eq i64 [[MASKEDPTR2]], 0
|
||||
// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND3]])
|
||||
// CHECK-NEXT: ret i8* [[CALL]]
|
||||
//
|
||||
void *t3_variable(int alignment) {
|
||||
return my_aligned_alloc(320, alignment);
|
||||
};
|
Loading…
Reference in New Issue