2020-06-19 21:45:27 +08:00
|
|
|
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
2014-09-08 06:58:14 +08:00
|
|
|
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck %s
|
|
|
|
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-LABEL: @test1(
|
2020-06-19 21:45:27 +08:00
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-NEXT: store i32* [[A:%.*]], i32** [[A_ADDR]], align 8
|
2020-06-19 21:45:27 +08:00
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*
|
2020-09-12 19:36:45 +08:00
|
|
|
// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32, i64 0) ]
|
2020-06-19 21:45:27 +08:00
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
|
|
|
|
// CHECK-NEXT: store i32* [[TMP2]], i32** [[A_ADDR]], align 8
|
|
|
|
// CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8
|
|
|
|
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i64 0
|
|
|
|
// CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP4]]
|
|
|
|
//
|
2014-09-08 06:58:14 +08:00
|
|
|
int test1(int *a) {
|
|
|
|
a = __builtin_assume_aligned(a, 32, 0ull);
|
|
|
|
return a[0];
|
|
|
|
}
|
|
|
|
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-LABEL: @test2(
|
2020-06-19 21:45:27 +08:00
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-NEXT: store i32* [[A:%.*]], i32** [[A_ADDR]], align 8
|
2020-06-19 21:45:27 +08:00
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*
|
2020-09-12 19:36:45 +08:00
|
|
|
// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32, i64 0) ]
|
2020-06-19 21:45:27 +08:00
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
|
|
|
|
// CHECK-NEXT: store i32* [[TMP2]], i32** [[A_ADDR]], align 8
|
|
|
|
// CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8
|
|
|
|
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i64 0
|
|
|
|
// CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP4]]
|
|
|
|
//
|
2014-09-08 06:58:14 +08:00
|
|
|
int test2(int *a) {
|
|
|
|
a = __builtin_assume_aligned(a, 32, 0);
|
|
|
|
return a[0];
|
|
|
|
}
|
|
|
|
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-LABEL: @test3(
|
2020-06-19 21:45:27 +08:00
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-NEXT: store i32* [[A:%.*]], i32** [[A_ADDR]], align 8
|
2020-06-19 21:45:27 +08:00
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*
|
2020-09-12 19:36:45 +08:00
|
|
|
// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32) ]
|
2020-06-19 21:45:27 +08:00
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
|
|
|
|
// CHECK-NEXT: store i32* [[TMP2]], i32** [[A_ADDR]], align 8
|
|
|
|
// CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8
|
|
|
|
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i64 0
|
|
|
|
// CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP4]]
|
|
|
|
//
|
2014-09-08 06:58:14 +08:00
|
|
|
int test3(int *a) {
|
|
|
|
a = __builtin_assume_aligned(a, 32);
|
|
|
|
return a[0];
|
|
|
|
}
|
|
|
|
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-LABEL: @test4(
|
2020-06-19 21:45:27 +08:00
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
|
|
|
|
// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-NEXT: store i32* [[A:%.*]], i32** [[A_ADDR]], align 8
|
|
|
|
// CHECK-NEXT: store i32 [[B:%.*]], i32* [[B_ADDR]], align 4
|
2020-06-19 21:45:27 +08:00
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*
|
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[B_ADDR]], align 4
|
|
|
|
// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP2]] to i64
|
2020-09-12 19:36:45 +08:00
|
|
|
// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32, i64 [[CONV]]) ]
|
2020-06-19 21:45:27 +08:00
|
|
|
// CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP1]] to i32*
|
|
|
|
// CHECK-NEXT: store i32* [[TMP3]], i32** [[A_ADDR]], align 8
|
|
|
|
// CHECK-NEXT: [[TMP4:%.*]] = load i32*, i32** [[A_ADDR]], align 8
|
|
|
|
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 0
|
|
|
|
// CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP5]]
|
|
|
|
//
|
2014-09-08 06:58:14 +08:00
|
|
|
int test4(int *a, int b) {
|
|
|
|
a = __builtin_assume_aligned(a, 32, b);
|
|
|
|
return a[0];
|
|
|
|
}
|
|
|
|
|
2022-02-15 22:27:12 +08:00
|
|
|
int *m1(void) __attribute__((assume_aligned(64)));
|
2014-09-26 13:04:30 +08:00
|
|
|
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-LABEL: @test5(
|
[Codegen] If reasonable, materialize clang's `AssumeAlignedAttr` as llvm's Alignment Attribute on call-site function return value
Summary:
This should be mostly NFC - we still lower the same alignment
knowledge to the IR. The main reasoning here is that
this somewhat improves readability of IR like this,
and will improve test coverage in upcoming patch.
Even though the alignment is guaranteed to always be an I-C-E,
we don't always materialize it as llvm's Alignment Attribute because:
1. There may be a non-zero offset
2. We may be sanitizing for alignment
Note that if there already was an IR alignment attribute
on return value, we union them, and thus the alignment
only ever rises.
Also, there is a second relevant clang attribute `AllocAlignAttr`,
so that is why `AbstractAssumeAlignedAttrEmitter` is templated.
Reviewers: erichkeane, jdoerfert, hfinkel, aaron.ballman, rsmith
Reviewed By: erichkeane
Subscribers: cfe-commits
Tags: #clang
Differential Revision: https://reviews.llvm.org/D73005
2020-01-24 03:50:15 +08:00
|
|
|
// CHECK-NEXT: entry:
|
2022-02-15 22:27:12 +08:00
|
|
|
// CHECK-NEXT: [[CALL:%.*]] = call align 64 i32* @m1()
|
[Codegen] If reasonable, materialize clang's `AssumeAlignedAttr` as llvm's Alignment Attribute on call-site function return value
Summary:
This should be mostly NFC - we still lower the same alignment
knowledge to the IR. The main reasoning here is that
this somewhat improves readability of IR like this,
and will improve test coverage in upcoming patch.
Even though the alignment is guaranteed to always be an I-C-E,
we don't always materialize it as llvm's Alignment Attribute because:
1. There may be a non-zero offset
2. We may be sanitizing for alignment
Note that if there already was an IR alignment attribute
on return value, we union them, and thus the alignment
only ever rises.
Also, there is a second relevant clang attribute `AllocAlignAttr`,
so that is why `AbstractAssumeAlignedAttrEmitter` is templated.
Reviewers: erichkeane, jdoerfert, hfinkel, aaron.ballman, rsmith
Reviewed By: erichkeane
Subscribers: cfe-commits
Tags: #clang
Differential Revision: https://reviews.llvm.org/D73005
2020-01-24 03:50:15 +08:00
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[CALL]], align 4
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
2022-02-15 22:27:12 +08:00
|
|
|
int test5(void) {
|
2014-09-26 13:04:30 +08:00
|
|
|
return *m1();
|
|
|
|
}
|
|
|
|
|
2022-02-15 22:27:12 +08:00
|
|
|
int *m2(void) __attribute__((assume_aligned(64, 12)));
|
2014-09-26 13:04:30 +08:00
|
|
|
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-LABEL: @test6(
|
2020-06-19 21:45:27 +08:00
|
|
|
// CHECK-NEXT: entry:
|
2022-02-15 22:27:12 +08:00
|
|
|
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m2()
|
2020-09-12 19:36:45 +08:00
|
|
|
// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 64, i64 12) ]
|
2020-06-19 21:45:27 +08:00
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[CALL]], align 4
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP0]]
|
|
|
|
//
|
2022-02-15 22:27:12 +08:00
|
|
|
int test6(void) {
|
2014-09-26 13:04:30 +08:00
|
|
|
return *m2();
|
|
|
|
}
|
|
|
|
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-LABEL: @pr43638(
|
2020-06-19 21:45:27 +08:00
|
|
|
// CHECK-NEXT: entry:
|
|
|
|
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-NEXT: store i32* [[A:%.*]], i32** [[A_ADDR]], align 8
|
2020-06-19 21:45:27 +08:00
|
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
|
|
|
|
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*
|
2021-09-25 06:54:17 +08:00
|
|
|
// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 4294967296) ]
|
2020-06-19 21:45:27 +08:00
|
|
|
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
|
|
|
|
// CHECK-NEXT: store i32* [[TMP2]], i32** [[A_ADDR]], align 8
|
|
|
|
// CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8
|
|
|
|
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i64 0
|
|
|
|
// CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
|
|
|
|
// CHECK-NEXT: ret i32 [[TMP4]]
|
|
|
|
//
|
2019-10-11 22:59:44 +08:00
|
|
|
int pr43638(int *a) {
|
|
|
|
a = __builtin_assume_aligned(a, 4294967296);
|
|
|
|
return a[0];
|
|
|
|
}
|