2010-04-19 03:06:43 +08:00
|
|
|
// RUN: %clang_cc1 -triple i386-unknown-unknown -emit-llvm %s -o - | FileCheck %s
|
2009-02-14 06:58:39 +08:00
|
|
|
|
2016-01-25 13:14:03 +08:00
|
|
|
struct I { int k[3]; };
|
|
|
|
struct M { struct I o[2]; };
|
|
|
|
struct M v1[1] = { [0].o[0 ... 1].k[0 ... 1] = 4, 5 };
|
|
|
|
unsigned v2[2][3] = {[0 ... 1][0 ... 1] = 2222, 3333};
|
|
|
|
|
|
|
|
// CHECK-DAG: %struct.M = type { [2 x %struct.I] }
|
|
|
|
// CHECK-DAG: %struct.I = type { [3 x i32] }
|
|
|
|
|
2018-02-19 17:49:11 +08:00
|
|
|
// CHECK-DAG: [1 x %struct.M] [%struct.M { [2 x %struct.I] [%struct.I { [3 x i32] [i32 4, i32 4, i32 0] }, %struct.I { [3 x i32] [i32 4, i32 4, i32 5] }] }],
|
|
|
|
// CHECK-DAG: [2 x [3 x i32]] {{[[][[]}}3 x i32] [i32 2222, i32 2222, i32 0], [3 x i32] [i32 2222, i32 2222, i32 3333]],
|
|
|
|
// CHECK-DAG: [[INIT14:.*]] = private global [16 x i32] [i32 0, i32 0, i32 0, i32 0, i32 0, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 0, i32 0, i32 0, i32 0], align 4
|
2016-01-25 13:14:03 +08:00
|
|
|
|
2008-02-19 06:44:02 +08:00
|
|
|
void f1() {
|
|
|
|
// Scalars in braces.
|
|
|
|
int a = { 1 };
|
|
|
|
}
|
|
|
|
|
|
|
|
void f2() {
|
|
|
|
int a[2][2] = { { 1, 2 }, { 3, 4 } };
|
|
|
|
int b[3][3] = { { 1, 2 }, { 3, 4 } };
|
|
|
|
int *c[2] = { &a[1][1], &b[2][2] };
|
|
|
|
int *d[2][2] = { {&a[1][1], &b[2][2]}, {&a[0][0], &b[1][1]} };
|
|
|
|
int *e[3][3] = { {&a[1][1], &b[2][2]}, {&a[0][0], &b[1][1]} };
|
2008-02-20 03:27:31 +08:00
|
|
|
char ext[3][3] = {".Y",".U",".V"};
|
2008-01-29 09:28:48 +08:00
|
|
|
}
|
2008-05-04 08:56:25 +08:00
|
|
|
|
|
|
|
typedef void (* F)(void);
|
|
|
|
extern void foo(void);
|
|
|
|
struct S { F f; };
|
|
|
|
void f3() {
|
|
|
|
struct S a[1] = { { foo } };
|
|
|
|
}
|
|
|
|
|
2009-02-14 06:58:39 +08:00
|
|
|
// Constants
|
2020-02-04 02:09:39 +08:00
|
|
|
// CHECK-DAG: @g3 = constant i32 10
|
2018-02-19 17:49:11 +08:00
|
|
|
// CHECK-DAG: @f4.g4 = internal constant i32 12
|
2009-02-14 06:58:39 +08:00
|
|
|
const int g3 = 10;
|
|
|
|
int f4() {
|
|
|
|
static const int g4 = 12;
|
|
|
|
return g4;
|
|
|
|
}
|
2010-03-09 05:08:07 +08:00
|
|
|
|
|
|
|
// PR6537
|
|
|
|
typedef union vec3 {
|
|
|
|
struct { double x, y, z; };
|
|
|
|
double component[3];
|
|
|
|
} vec3;
|
|
|
|
vec3 f5(vec3 value) {
|
|
|
|
return (vec3) {{
|
|
|
|
.x = value.x
|
|
|
|
}};
|
|
|
|
}
|
2010-07-07 13:08:32 +08:00
|
|
|
|
|
|
|
// rdar://problem/8154689
|
|
|
|
void f6() {
|
|
|
|
int x;
|
|
|
|
long ids[] = { (long) &x };
|
|
|
|
}
|
2010-10-11 01:49:49 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2020-02-04 02:09:39 +08:00
|
|
|
// CHECK-DAG: @test7 = global{{.*}}{ i32 0, [4 x i8] c"bar\00" }
|
2010-10-11 01:49:49 +08:00
|
|
|
// PR8217
|
|
|
|
struct a7 {
|
|
|
|
int b;
|
|
|
|
char v[];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct a7 test7 = { .b = 0, .v = "bar" };
|
Enhance the init generation logic to emit a memset followed by a few stores when
a global is larger than 32 bytes and has fewer than 6 non-zero values in the
initializer. Previously we'd turn something like this:
char test8(int X) {
char str[10000] = "abc";
into a 10K global variable which we then memcpy'd from. Now we generate:
%str = alloca [10000 x i8], align 16
%tmp = getelementptr inbounds [10000 x i8]* %str, i64 0, i64 0
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 10000, i32 16, i1 false)
store i8 97, i8* %tmp, align 16
%0 = getelementptr [10000 x i8]* %str, i64 0, i64 1
store i8 98, i8* %0, align 1
%1 = getelementptr [10000 x i8]* %str, i64 0, i64 2
store i8 99, i8* %1, align 2
Which is much smaller in space and also likely faster.
This is part of PR279
llvm-svn: 120645
2010-12-02 09:58:41 +08:00
|
|
|
|
|
|
|
|
2020-02-04 02:09:39 +08:00
|
|
|
// CHECK-DAG: @huge_array = global {{.*}} <{ i32 1, i32 0, i32 2, i32 0, i32 3, [999999995 x i32] zeroinitializer }>
|
2018-05-24 07:41:38 +08:00
|
|
|
int huge_array[1000000000] = {1, 0, 2, 0, 3, 0, 0, 0};
|
|
|
|
|
2020-02-04 02:09:39 +08:00
|
|
|
// CHECK-DAG: @huge_struct = global {{.*}} { i32 1, <{ i32, [999999999 x i32] }> <{ i32 2, [999999999 x i32] zeroinitializer }> }
|
2018-05-24 07:41:38 +08:00
|
|
|
struct Huge {
|
|
|
|
int a;
|
|
|
|
int arr[1000 * 1000 * 1000];
|
|
|
|
} huge_struct = {1, {2, 0, 0, 0}};
|
|
|
|
|
2020-02-04 02:09:39 +08:00
|
|
|
// CHECK-DAG: @large_array_with_zeroes = constant <{ [21 x i8], [979 x i8] }> <{ [21 x i8] c"abc\01\02\03xyzzy\00\00\00\00\00\00\00\00\00q", [979 x i8] zeroinitializer }>
|
2018-07-20 05:38:56 +08:00
|
|
|
const char large_array_with_zeroes[1000] = {
|
|
|
|
'a', 'b', 'c', 1, 2, 3, 'x', 'y', 'z', 'z', 'y', [20] = 'q'
|
|
|
|
};
|
2018-05-24 07:41:38 +08:00
|
|
|
|
2018-07-20 07:24:41 +08:00
|
|
|
char global;
|
|
|
|
|
2020-02-04 02:09:39 +08:00
|
|
|
// CHECK-DAG: @large_array_with_zeroes_2 = global <{ [10 x i8*], [90 x i8*] }> <{ [10 x i8*] [i8* null, i8* null, i8* null, i8* null, i8* null, i8* null, i8* null, i8* null, i8* null, i8* @global], [90 x i8*] zeroinitializer }>
|
2018-07-20 07:24:41 +08:00
|
|
|
const void *large_array_with_zeroes_2[100] = {
|
|
|
|
[9] = &global
|
|
|
|
};
|
2020-02-04 02:09:39 +08:00
|
|
|
// CHECK-DAG: @large_array_with_zeroes_3 = global <{ [10 x i8*], [990 x i8*] }> <{ [10 x i8*] [i8* null, i8* null, i8* null, i8* null, i8* null, i8* null, i8* null, i8* null, i8* null, i8* @global], [990 x i8*] zeroinitializer }>
|
2018-07-20 07:24:41 +08:00
|
|
|
const void *large_array_with_zeroes_3[1000] = {
|
|
|
|
[9] = &global
|
|
|
|
};
|
|
|
|
|
Enhance the init generation logic to emit a memset followed by a few stores when
a global is larger than 32 bytes and has fewer than 6 non-zero values in the
initializer. Previously we'd turn something like this:
char test8(int X) {
char str[10000] = "abc";
into a 10K global variable which we then memcpy'd from. Now we generate:
%str = alloca [10000 x i8], align 16
%tmp = getelementptr inbounds [10000 x i8]* %str, i64 0, i64 0
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 10000, i32 16, i1 false)
store i8 97, i8* %tmp, align 16
%0 = getelementptr [10000 x i8]* %str, i64 0, i64 1
store i8 98, i8* %0, align 1
%1 = getelementptr [10000 x i8]* %str, i64 0, i64 2
store i8 99, i8* %1, align 2
Which is much smaller in space and also likely faster.
This is part of PR279
llvm-svn: 120645
2010-12-02 09:58:41 +08:00
|
|
|
// PR279 comment #3
|
|
|
|
char test8(int X) {
|
|
|
|
char str[100000] = "abc"; // tail should be memset.
|
|
|
|
return str[X];
|
2018-07-14 04:33:23 +08:00
|
|
|
// CHECK-LABEL: @test8(
|
|
|
|
// CHECK: call void @llvm.memset
|
|
|
|
// CHECK: store i8 97, i8* %{{[0-9]*}}, align 1
|
|
|
|
// CHECK: store i8 98, i8* %{{[0-9]*}}, align 1
|
|
|
|
// CHECK: store i8 99, i8* %{{[0-9]*}}, align 1
|
|
|
|
// CHECK-NOT: getelementptr
|
|
|
|
// CHECK: load
|
Enhance the init generation logic to emit a memset followed by a few stores when
a global is larger than 32 bytes and has fewer than 6 non-zero values in the
initializer. Previously we'd turn something like this:
char test8(int X) {
char str[10000] = "abc";
into a 10K global variable which we then memcpy'd from. Now we generate:
%str = alloca [10000 x i8], align 16
%tmp = getelementptr inbounds [10000 x i8]* %str, i64 0, i64 0
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 10000, i32 16, i1 false)
store i8 97, i8* %tmp, align 16
%0 = getelementptr [10000 x i8]* %str, i64 0, i64 1
store i8 98, i8* %0, align 1
%1 = getelementptr [10000 x i8]* %str, i64 0, i64 2
store i8 99, i8* %1, align 2
Which is much smaller in space and also likely faster.
This is part of PR279
llvm-svn: 120645
2010-12-02 09:58:41 +08:00
|
|
|
}
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
|
|
|
|
void bar(void*);
|
|
|
|
|
|
|
|
// PR279
|
2018-07-14 04:33:23 +08:00
|
|
|
void test9(int X) {
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
int Arr[100] = { X }; // Should use memset
|
|
|
|
bar(Arr);
|
2018-07-14 04:33:23 +08:00
|
|
|
// CHECK-LABEL: @test9(
|
|
|
|
// CHECK: call void @llvm.memset
|
|
|
|
// CHECK-NOT: store i32 0
|
|
|
|
// CHECK: call void @bar
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
struct a {
|
|
|
|
int a, b, c, d, e, f, g, h, i, j, k, *p;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct b {
|
|
|
|
struct a a,b,c,d,e,f,g;
|
|
|
|
};
|
|
|
|
|
2018-07-14 04:33:23 +08:00
|
|
|
void test10(int X) {
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
struct b S = { .a.a = X, .d.e = X, .f.e = 0, .f.f = 0, .f.p = 0 };
|
|
|
|
bar(&S);
|
|
|
|
|
2018-07-14 04:33:23 +08:00
|
|
|
// CHECK-LABEL: @test10(
|
Improve codegen for initializer lists to use memset more aggressively
when an initializer is variable (I handled the constant case in a previous
patch). This has three pieces:
1. Enhance AggValueSlot to have a 'isZeroed' bit to tell CGExprAgg that
the memory being stored into has previously been memset to zero.
2. Teach CGExprAgg to not emit stores of zero to isZeroed memory.
3. Teach CodeGenFunction::EmitAggExpr to scan initializers to determine
whether they are profitable to emit a memset + inividual stores vs
stores for everything.
The heuristic used is that a global has to be more than 16 bytes and
has to be 3/4 zero to be candidate for this xform. The two testcases
are illustrative of the scenarios this catches. We now codegen test9 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 400, i32 4, i1 false)
%.array = getelementptr inbounds [100 x i32]* %Arr, i32 0, i32 0
%tmp = load i32* %X.addr, align 4
store i32 %tmp, i32* %.array
and test10 into:
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 392, i32 8, i1 false)
%tmp = getelementptr inbounds %struct.b* %S, i32 0, i32 0
%tmp1 = getelementptr inbounds %struct.a* %tmp, i32 0, i32 0
%tmp2 = load i32* %X.addr, align 4
store i32 %tmp2, i32* %tmp1, align 4
%tmp5 = getelementptr inbounds %struct.b* %S, i32 0, i32 3
%tmp10 = getelementptr inbounds %struct.a* %tmp5, i32 0, i32 4
%tmp11 = load i32* %X.addr, align 4
store i32 %tmp11, i32* %tmp10, align 4
Previously we produced 99 stores of zero for test9 and also tons for test10.
This xforms should substantially speed up -O0 builds when it kicks in as well
as reducing code size and optimizer heartburn on insane cases. This resolves
PR279.
llvm-svn: 120692
2010-12-02 15:07:26 +08:00
|
|
|
// CHECK: call void @llvm.memset
|
|
|
|
// CHECK-NOT: store i32 0
|
|
|
|
// CHECK: call void @bar
|
|
|
|
}
|
2011-02-20 06:28:58 +08:00
|
|
|
|
2018-07-25 12:29:03 +08:00
|
|
|
void nonzeroMemseti8() {
|
|
|
|
char arr[33] = { 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, };
|
|
|
|
// CHECK-LABEL: @nonzeroMemseti8(
|
|
|
|
// CHECK-NOT: store
|
|
|
|
// CHECK-NOT: memcpy
|
|
|
|
// CHECK: call void @llvm.memset.p0i8.i32(i8* {{.*}}, i8 42, i32 33, i1 false)
|
|
|
|
}
|
|
|
|
|
|
|
|
void nonzeroMemseti16() {
|
|
|
|
unsigned short arr[17] = { 0x4242, 0x4242, 0x4242, 0x4242, 0x4242, 0x4242, 0x4242, 0x4242, 0x4242, 0x4242, 0x4242, 0x4242, 0x4242, 0x4242, 0x4242, 0x4242, 0x4242, };
|
|
|
|
// CHECK-LABEL: @nonzeroMemseti16(
|
|
|
|
// CHECK-NOT: store
|
|
|
|
// CHECK-NOT: memcpy
|
|
|
|
// CHECK: call void @llvm.memset.p0i8.i32(i8* {{.*}}, i8 66, i32 34, i1 false)
|
|
|
|
}
|
|
|
|
|
|
|
|
void nonzeroMemseti32() {
|
|
|
|
unsigned arr[9] = { 0xF0F0F0F0, 0xF0F0F0F0, 0xF0F0F0F0, 0xF0F0F0F0, 0xF0F0F0F0, 0xF0F0F0F0, 0xF0F0F0F0, 0xF0F0F0F0, 0xF0F0F0F0, };
|
|
|
|
// CHECK-LABEL: @nonzeroMemseti32(
|
|
|
|
// CHECK-NOT: store
|
|
|
|
// CHECK-NOT: memcpy
|
|
|
|
// CHECK: call void @llvm.memset.p0i8.i32(i8* {{.*}}, i8 -16, i32 36, i1 false)
|
|
|
|
}
|
|
|
|
|
|
|
|
void nonzeroMemseti64() {
|
|
|
|
unsigned long long arr[7] = { 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, 0xAAAAAAAAAAAAAAAA, };
|
|
|
|
// CHECK-LABEL: @nonzeroMemseti64(
|
|
|
|
// CHECK-NOT: store
|
|
|
|
// CHECK-NOT: memcpy
|
|
|
|
// CHECK: call void @llvm.memset.p0i8.i32(i8* {{.*}}, i8 -86, i32 56, i1 false)
|
|
|
|
}
|
|
|
|
|
|
|
|
void nonzeroMemsetf32() {
|
|
|
|
float arr[9] = { 0x1.cacacap+75, 0x1.cacacap+75, 0x1.cacacap+75, 0x1.cacacap+75, 0x1.cacacap+75, 0x1.cacacap+75, 0x1.cacacap+75, 0x1.cacacap+75, 0x1.cacacap+75, };
|
|
|
|
// CHECK-LABEL: @nonzeroMemsetf32(
|
|
|
|
// CHECK-NOT: store
|
|
|
|
// CHECK-NOT: memcpy
|
|
|
|
// CHECK: call void @llvm.memset.p0i8.i32(i8* {{.*}}, i8 101, i32 36, i1 false)
|
|
|
|
}
|
|
|
|
|
|
|
|
void nonzeroMemsetf64() {
|
|
|
|
double arr[7] = { 0x1.4444444444444p+69, 0x1.4444444444444p+69, 0x1.4444444444444p+69, 0x1.4444444444444p+69, 0x1.4444444444444p+69, 0x1.4444444444444p+69, 0x1.4444444444444p+69, };
|
|
|
|
// CHECK-LABEL: @nonzeroMemsetf64(
|
|
|
|
// CHECK-NOT: store
|
|
|
|
// CHECK-NOT: memcpy
|
|
|
|
// CHECK: call void @llvm.memset.p0i8.i32(i8* {{.*}}, i8 68, i32 56, i1 false)
|
|
|
|
}
|
|
|
|
|
|
|
|
void nonzeroPaddedUnionMemset() {
|
|
|
|
union U { char c; int i; };
|
|
|
|
union U arr[9] = { 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, };
|
|
|
|
// CHECK-LABEL: @nonzeroPaddedUnionMemset(
|
|
|
|
// CHECK-NOT: store
|
|
|
|
// CHECK-NOT: memcpy
|
|
|
|
// CHECK: call void @llvm.memset.p0i8.i32(i8* {{.*}}, i8 -16, i32 36, i1 false)
|
|
|
|
}
|
|
|
|
|
|
|
|
void nonzeroNestedMemset() {
|
|
|
|
union U { char c; int i; };
|
|
|
|
struct S { union U u; short i; };
|
|
|
|
struct S arr[5] = { { {0xF0}, 0xF0F0 }, { {0xF0}, 0xF0F0 }, { {0xF0}, 0xF0F0 }, { {0xF0}, 0xF0F0 }, { {0xF0}, 0xF0F0 }, };
|
|
|
|
// CHECK-LABEL: @nonzeroNestedMemset(
|
|
|
|
// CHECK-NOT: store
|
|
|
|
// CHECK-NOT: memcpy
|
|
|
|
// CHECK: call void @llvm.memset.p0i8.i32(i8* {{.*}}, i8 -16, i32 40, i1 false)
|
|
|
|
}
|
2011-02-20 06:28:58 +08:00
|
|
|
|
|
|
|
// PR9257
|
|
|
|
struct test11S {
|
|
|
|
int A[10];
|
|
|
|
};
|
|
|
|
void test11(struct test11S *P) {
|
|
|
|
*P = (struct test11S) { .A = { [0 ... 3] = 4 } };
|
2018-07-14 04:33:23 +08:00
|
|
|
// CHECK-LABEL: @test11(
|
|
|
|
// CHECK: store i32 4, i32* %{{.*}}, align 4
|
|
|
|
// CHECK: store i32 4, i32* %{{.*}}, align 4
|
|
|
|
// CHECK: store i32 4, i32* %{{.*}}, align 4
|
|
|
|
// CHECK: store i32 4, i32* %{{.*}}, align 4
|
2011-02-20 06:28:58 +08:00
|
|
|
// CHECK: ret void
|
|
|
|
}
|
2011-07-10 08:18:59 +08:00
|
|
|
|
|
|
|
|
|
|
|
// Verify that we can convert a recursive struct with a memory that returns
|
|
|
|
// an instance of the struct we're converting.
|
|
|
|
struct test12 {
|
|
|
|
struct test12 (*p)(void);
|
|
|
|
} test12g;
|
|
|
|
|
2012-02-25 07:53:49 +08:00
|
|
|
|
|
|
|
void test13(int x) {
|
|
|
|
struct X { int a; int b : 10; int c; };
|
|
|
|
struct X y = {.c = x};
|
2018-07-14 04:33:23 +08:00
|
|
|
// CHECK-LABEL: @test13(
|
2012-12-06 19:14:44 +08:00
|
|
|
// CHECK: and i16 {{.*}}, -1024
|
2012-02-25 07:53:49 +08:00
|
|
|
}
|
2014-07-30 05:20:12 +08:00
|
|
|
|
2018-07-14 04:33:23 +08:00
|
|
|
// CHECK-LABEL: @PR20473(
|
2014-07-30 05:20:12 +08:00
|
|
|
void PR20473() {
|
2015-03-14 02:21:46 +08:00
|
|
|
// CHECK: memcpy{{.*}}getelementptr inbounds ([2 x i8], [2 x i8]* @
|
2014-07-30 05:20:12 +08:00
|
|
|
bar((char[2]) {""});
|
2015-03-14 02:21:46 +08:00
|
|
|
// CHECK: memcpy{{.*}}getelementptr inbounds ([3 x i8], [3 x i8]* @
|
2014-07-30 05:20:12 +08:00
|
|
|
bar((char[3]) {""});
|
|
|
|
}
|
2018-02-19 17:49:11 +08:00
|
|
|
|
|
|
|
// Test that we initialize large member arrays by copying from a global and not
|
|
|
|
// with a series of stores.
|
|
|
|
struct S14 { int a[16]; };
|
|
|
|
|
|
|
|
void test14(struct S14 *s14) {
|
2018-07-14 04:33:23 +08:00
|
|
|
// CHECK-LABEL: @test14(
|
|
|
|
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 {{.*}}, i8* align 4 {{.*}} [[INIT14]] {{.*}}, i32 64, i1 false)
|
|
|
|
// CHECK-NOT: store
|
|
|
|
// CHECK: ret void
|
2018-02-19 17:49:11 +08:00
|
|
|
*s14 = (struct S14) { { [5 ... 11] = 17 } };
|
|
|
|
}
|