2009-12-16 04:14:24 +08:00
|
|
|
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o %t %s
|
2010-04-22 03:10:54 +08:00
|
|
|
// RUN: FileCheck < %t %s
|
2009-02-14 10:09:24 +08:00
|
|
|
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: %0 = type { i64, double }
|
|
|
|
|
|
|
|
// CHECK: define signext i8 @f0()
|
2009-02-14 10:09:24 +08:00
|
|
|
char f0(void) {
|
2009-07-22 04:52:43 +08:00
|
|
|
return 0;
|
2009-02-14 10:09:24 +08:00
|
|
|
}
|
|
|
|
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: define signext i16 @f1()
|
2009-02-14 10:09:24 +08:00
|
|
|
short f1(void) {
|
2009-07-22 04:52:43 +08:00
|
|
|
return 0;
|
2009-02-14 10:09:24 +08:00
|
|
|
}
|
|
|
|
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: define i32 @f2()
|
2009-02-14 10:09:24 +08:00
|
|
|
int f2(void) {
|
2009-07-22 04:52:43 +08:00
|
|
|
return 0;
|
2009-02-14 10:09:24 +08:00
|
|
|
}
|
|
|
|
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: define float @f3()
|
2009-02-14 10:09:24 +08:00
|
|
|
float f3(void) {
|
2009-07-22 04:52:43 +08:00
|
|
|
return 0;
|
2009-02-14 10:09:24 +08:00
|
|
|
}
|
|
|
|
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: define double @f4()
|
2009-02-14 10:09:24 +08:00
|
|
|
double f4(void) {
|
2009-07-22 04:52:43 +08:00
|
|
|
return 0;
|
2009-02-14 10:09:24 +08:00
|
|
|
}
|
|
|
|
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: define x86_fp80 @f5()
|
2009-02-14 10:09:24 +08:00
|
|
|
long double f5(void) {
|
2009-07-22 04:52:43 +08:00
|
|
|
return 0;
|
2009-02-14 10:09:24 +08:00
|
|
|
}
|
|
|
|
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: define void @f6(i8 signext %a0, i16 signext %a1, i32 %a2, i64 %a3, i8* %a4)
|
2009-02-14 10:09:24 +08:00
|
|
|
void f6(char a0, short a1, int a2, long long a3, void *a4) {
|
|
|
|
}
|
2009-02-27 01:38:19 +08:00
|
|
|
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: define void @f7(i32 %a0)
|
|
|
|
typedef enum { A, B, C } e7;
|
|
|
|
void f7(e7 a0) {
|
2009-02-27 03:00:14 +08:00
|
|
|
}
|
2009-03-07 01:50:25 +08:00
|
|
|
|
|
|
|
// Test merging/passing of upper eightbyte with X87 class.
|
2010-04-22 03:10:54 +08:00
|
|
|
//
|
|
|
|
// CHECK: define %0 @f8_1()
|
Change CGCall to handle the "coerce" case where the coerce-to type
is a FCA to pass each of the elements as individual scalars. This
produces code fast isel is less likely to reject and is easier on
the optimizers.
For example, before we would compile:
struct DeclGroup { long NumDecls; char * Y; };
char * foo(DeclGroup D) {
return D.NumDecls+D.Y;
}
to:
%struct.DeclGroup = type { i64, i64 }
define i64 @_Z3foo9DeclGroup(%struct.DeclGroup) nounwind {
entry:
%D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=3]
store %struct.DeclGroup %0, %struct.DeclGroup* %D, align 1
%tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1]
%tmp1 = load i64* %tmp ; <i64> [#uses=1]
%tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i64*> [#uses=1]
%tmp3 = load i64* %tmp2 ; <i64> [#uses=1]
%add = add nsw i64 %tmp1, %tmp3 ; <i64> [#uses=1]
ret i64 %add
}
Now we get:
%0 = type { i64, i64 }
%struct.DeclGroup = type { i64, i8* }
define i8* @_Z3foo9DeclGroup(i64, i64) nounwind {
entry:
%D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=3]
%2 = insertvalue %0 undef, i64 %0, 0 ; <%0> [#uses=1]
%3 = insertvalue %0 %2, i64 %1, 1 ; <%0> [#uses=1]
%4 = bitcast %struct.DeclGroup* %D to %0* ; <%0*> [#uses=1]
store %0 %3, %0* %4, align 1
%tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1]
%tmp1 = load i64* %tmp ; <i64> [#uses=1]
%tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1]
%tmp3 = load i8** %tmp2 ; <i8*> [#uses=1]
%add.ptr = getelementptr inbounds i8* %tmp3, i64 %tmp1 ; <i8*> [#uses=1]
ret i8* %add.ptr
}
Elimination of the FCA inside the function is still-to-come.
llvm-svn: 107099
2010-06-29 07:44:11 +08:00
|
|
|
// CHECK: define void @f8_2(i64, double)
|
2009-03-07 01:50:25 +08:00
|
|
|
union u8 {
|
|
|
|
long double a;
|
|
|
|
int b;
|
|
|
|
};
|
2009-07-22 04:52:43 +08:00
|
|
|
union u8 f8_1() { while (1) {} }
|
2009-03-07 01:50:25 +08:00
|
|
|
void f8_2(union u8 a0) {}
|
2009-05-09 06:26:44 +08:00
|
|
|
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: define i64 @f9()
|
2009-07-22 04:52:43 +08:00
|
|
|
struct s9 { int a; int b; int : 0; } f9(void) { while (1) {} }
|
2009-05-09 06:26:44 +08:00
|
|
|
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: define void @f10(i64)
|
2009-05-09 06:26:44 +08:00
|
|
|
struct s10 { int a; int b; int : 0; };
|
|
|
|
void f10(struct s10 a0) {}
|
|
|
|
|
2010-04-22 03:49:55 +08:00
|
|
|
// CHECK: define void @f11(%struct.s19* sret %agg.result)
|
2009-07-22 04:52:43 +08:00
|
|
|
union { long double a; float b; } f11() { while (1) {} }
|
2009-05-12 23:22:40 +08:00
|
|
|
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: define i64 @f12_0()
|
|
|
|
// CHECK: define void @f12_1(i64)
|
2009-05-14 02:54:26 +08:00
|
|
|
struct s12 { int a __attribute__((aligned(16))); };
|
2009-07-22 04:52:43 +08:00
|
|
|
struct s12 f12_0(void) { while (1) {} }
|
2009-05-14 02:54:26 +08:00
|
|
|
void f12_1(struct s12 a0) {}
|
|
|
|
|
2009-05-23 01:33:44 +08:00
|
|
|
// Check that sret parameter is accounted for when checking available integer
|
|
|
|
// registers.
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: define void @f13(%struct.s13_0* sret %agg.result, i32 %a, i32 %b, i32 %c, i32 %d, %struct.s13_1* byval %e, i32 %f)
|
2009-05-23 01:33:44 +08:00
|
|
|
|
|
|
|
struct s13_0 { long long f0[3]; };
|
2009-08-24 03:28:59 +08:00
|
|
|
struct s13_1 { long long f0[2]; };
|
2010-04-22 03:10:54 +08:00
|
|
|
struct s13_0 f13(int a, int b, int c, int d,
|
2009-08-24 03:28:59 +08:00
|
|
|
struct s13_1 e, int f) { while (1) {} }
|
2009-05-23 01:33:44 +08:00
|
|
|
|
2010-04-22 03:10:54 +08:00
|
|
|
// CHECK: define void @f14({{.*}}, i8 signext %X)
|
|
|
|
void f14(int a, int b, int c, int d, int e, int f, char X) {}
|
|
|
|
|
|
|
|
// CHECK: define void @f15({{.*}}, i8* %X)
|
|
|
|
void f15(int a, int b, int c, int d, int e, int f, void *X) {}
|
|
|
|
|
|
|
|
// CHECK: define void @f16({{.*}}, float %X)
|
2009-05-27 00:37:37 +08:00
|
|
|
void f16(float a, float b, float c, float d, float e, float f, float g, float h,
|
|
|
|
float X) {}
|
2010-04-22 03:10:54 +08:00
|
|
|
|
|
|
|
// CHECK: define void @f17({{.*}}, x86_fp80 %X)
|
2009-05-27 00:37:37 +08:00
|
|
|
void f17(float a, float b, float c, float d, float e, float f, float g, float h,
|
|
|
|
long double X) {}
|
|
|
|
|
X86-64:
pass/return structs of float/int as float/i32 instead of double/i64
to make the code generated for ABI cleaner. Passing in the low part
of a double is the same as passing in a float.
For example, we now compile:
struct DeclGroup { float NumDecls; };
float foo(DeclGroup D);
void bar(DeclGroup *D) {
foo(*D);
}
into:
%struct.DeclGroup = type { float }
define void @_Z3barP9DeclGroup(%struct.DeclGroup* %D) nounwind {
entry:
%D.addr = alloca %struct.DeclGroup*, align 8 ; <%struct.DeclGroup**> [#uses=2]
%agg.tmp = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2]
store %struct.DeclGroup* %D, %struct.DeclGroup** %D.addr
%tmp = load %struct.DeclGroup** %D.addr ; <%struct.DeclGroup*> [#uses=1]
%tmp1 = bitcast %struct.DeclGroup* %agg.tmp to i8* ; <i8*> [#uses=1]
%tmp2 = bitcast %struct.DeclGroup* %tmp to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 4, i32 4, i1 false)
%coerce.dive = getelementptr %struct.DeclGroup* %agg.tmp, i32 0, i32 0 ; <float*> [#uses=1]
%0 = load float* %coerce.dive, align 1 ; <float> [#uses=1]
%call = call float @_Z3foo9DeclGroup(float %0) ; <float> [#uses=0]
ret void
}
instead of:
%struct.DeclGroup = type { float }
define void @_Z3barP9DeclGroup(%struct.DeclGroup* %D) nounwind {
entry:
%D.addr = alloca %struct.DeclGroup*, align 8 ; <%struct.DeclGroup**> [#uses=2]
%agg.tmp = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2]
%tmp3 = alloca double ; <double*> [#uses=2]
store %struct.DeclGroup* %D, %struct.DeclGroup** %D.addr
%tmp = load %struct.DeclGroup** %D.addr ; <%struct.DeclGroup*> [#uses=1]
%tmp1 = bitcast %struct.DeclGroup* %agg.tmp to i8* ; <i8*> [#uses=1]
%tmp2 = bitcast %struct.DeclGroup* %tmp to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 4, i32 4, i1 false)
%coerce.dive = getelementptr %struct.DeclGroup* %agg.tmp, i32 0, i32 0 ; <float*> [#uses=1]
%0 = bitcast double* %tmp3 to float* ; <float*> [#uses=1]
%1 = load float* %coerce.dive ; <float> [#uses=1]
store float %1, float* %0, align 1
%2 = load double* %tmp3 ; <double> [#uses=1]
%call = call float @_Z3foo9DeclGroup(double %2) ; <float> [#uses=0]
ret void
}
which is this machine code (at -O0):
__Z3barP9DeclGroup:
subq $24, %rsp
movq %rdi, 16(%rsp)
movq 16(%rsp), %rdi
leaq 8(%rsp), %rax
movl (%rdi), %ecx
movl %ecx, (%rax)
movss 8(%rsp), %xmm0
callq __Z3foo9DeclGroup
addq $24, %rsp
ret
vs this:
__Z3barP9DeclGroup:
subq $24, %rsp
movq %rdi, 16(%rsp)
movq 16(%rsp), %rdi
leaq 8(%rsp), %rax
movl (%rdi), %ecx
movl %ecx, (%rax)
movss 8(%rsp), %xmm0
movss %xmm0, (%rsp)
movsd (%rsp), %xmm0
callq __Z3foo9DeclGroup
addq $24, %rsp
ret
At -O3, it is the difference between this now:
__Z3barP9DeclGroup:
movss (%rdi), %xmm0
jmp __Z3foo9DeclGroup # TAILCALL
vs this before:
__Z3barP9DeclGroup:
movl (%rdi), %eax
movd %rax, %xmm0
jmp __Z3foo9DeclGroup # TAILCALL
llvm-svn: 107048
2010-06-29 03:56:59 +08:00
|
|
|
// Check for valid coercion. The struct should be passed/returned as i32, not
|
|
|
|
// as i64 for better code quality.
|
|
|
|
// rdar://8135035
|
|
|
|
// CHECK: define void @f18(i32 %a, i32)
|
2009-08-24 03:28:59 +08:00
|
|
|
struct f18_s0 { int f0; };
|
|
|
|
void f18(int a, struct f18_s0 f18_arg1) { while (1) {} }
|
2009-06-05 15:58:54 +08:00
|
|
|
|
2010-04-22 03:49:55 +08:00
|
|
|
// Check byval alignment.
|
|
|
|
|
|
|
|
// CHECK: define void @f19(%struct.s19* byval align 16 %x)
|
|
|
|
struct s19 {
|
|
|
|
long double a;
|
|
|
|
};
|
|
|
|
void f19(struct s19 x) {}
|
|
|
|
|
|
|
|
// CHECK: define void @f20(%struct.s20* byval align 32 %x)
|
|
|
|
struct __attribute__((aligned(32))) s20 {
|
|
|
|
int x;
|
|
|
|
int y;
|
|
|
|
};
|
|
|
|
void f20(struct s20 x) {}
|