llvm-project/clang/test/CodeGen/x86_64-arguments.c

262 lines
5.8 KiB
C
Raw Normal View History

Finally pass "two floats in a 64-bit unit" as a <2 x float> instead of as a double in the x86-64 ABI. This allows us to generate much better code for certain things, e.g.: _Complex float f32(_Complex float A, _Complex float B) { return A+B; } Used to compile into (look at the integer silliness!): _f32: ## @f32 ## BB#0: ## %entry movd %xmm1, %rax movd %eax, %xmm1 movd %xmm0, %rcx movd %ecx, %xmm0 addss %xmm1, %xmm0 movd %xmm0, %edx shrq $32, %rax movd %eax, %xmm0 shrq $32, %rcx movd %ecx, %xmm1 addss %xmm0, %xmm1 movd %xmm1, %eax shlq $32, %rax addq %rdx, %rax movd %rax, %xmm0 ret Now we get: _f32: ## @f32 movdqa %xmm0, %xmm2 addss %xmm1, %xmm2 pshufd $16, %xmm2, %xmm2 pshufd $1, %xmm1, %xmm1 pshufd $1, %xmm0, %xmm0 addss %xmm1, %xmm0 pshufd $16, %xmm0, %xmm1 movdqa %xmm2, %xmm0 unpcklps %xmm1, %xmm0 ret and compile stuff like: extern float _Complex ccoshf( float _Complex ) ; float _Complex ccosf ( float _Complex z ) { float _Complex iz; (__real__ iz) = -(__imag__ z); (__imag__ iz) = (__real__ z); return ccoshf(iz); } into: _ccosf: ## @ccosf ## BB#0: ## %entry pshufd $1, %xmm0, %xmm1 xorps LCPI4_0(%rip), %xmm1 unpcklps %xmm0, %xmm1 movaps %xmm1, %xmm0 jmp _ccoshf ## TAILCALL instead of: _ccosf: ## @ccosf ## BB#0: ## %entry movd %xmm0, %rax movq %rax, %rcx shlq $32, %rcx shrq $32, %rax xorl $-2147483648, %eax ## imm = 0xFFFFFFFF80000000 addq %rcx, %rax movd %rax, %xmm0 jmp _ccoshf ## TAILCALL There is still "stuff to be done" here for the struct case, but this resolves rdar://6379669 - [x86-64 ABI] Pass and return _Complex float / double efficiently llvm-svn: 112111
2010-08-26 07:39:14 +08:00
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s| FileCheck %s
#include <stdarg.h>
// CHECK: define signext i8 @f0()
char f0(void) {
2009-07-22 04:52:43 +08:00
return 0;
}
// CHECK: define signext i16 @f1()
short f1(void) {
2009-07-22 04:52:43 +08:00
return 0;
}
// CHECK: define i32 @f2()
int f2(void) {
2009-07-22 04:52:43 +08:00
return 0;
}
// CHECK: define float @f3()
float f3(void) {
2009-07-22 04:52:43 +08:00
return 0;
}
// CHECK: define double @f4()
double f4(void) {
2009-07-22 04:52:43 +08:00
return 0;
}
// CHECK: define x86_fp80 @f5()
long double f5(void) {
2009-07-22 04:52:43 +08:00
return 0;
}
// CHECK: define void @f6(i8 signext %a0, i16 signext %a1, i32 %a2, i64 %a3, i8* %a4)
void f6(char a0, short a1, int a2, long long a3, void *a4) {
}
// CHECK: define void @f7(i32 %a0)
typedef enum { A, B, C } e7;
void f7(e7 a0) {
}
// Test merging/passing of upper eightbyte with X87 class.
//
// CHECK: define void @f8_1(%struct.s19* sret %agg.result)
// CHECK: define void @f8_2(%struct.s19* byval align 16 %a0)
union u8 {
long double a;
int b;
};
2009-07-22 04:52:43 +08:00
union u8 f8_1() { while (1) {} }
void f8_2(union u8 a0) {}
// CHECK: define i64 @f9()
2009-07-22 04:52:43 +08:00
struct s9 { int a; int b; int : 0; } f9(void) { while (1) {} }
// CHECK: define void @f10(i64 %a0.coerce)
struct s10 { int a; int b; int : 0; };
void f10(struct s10 a0) {}
// CHECK: define void @f11(%struct.s19* sret %agg.result)
2009-07-22 04:52:43 +08:00
union { long double a; float b; } f11() { while (1) {} }
// CHECK: define i32 @f12_0()
// CHECK: define void @f12_1(i32 %a0.coerce)
struct s12 { int a __attribute__((aligned(16))); };
2009-07-22 04:52:43 +08:00
struct s12 f12_0(void) { while (1) {} }
void f12_1(struct s12 a0) {}
// Check that sret parameter is accounted for when checking available integer
// registers.
// CHECK: define void @f13(%struct.s13_0* sret %agg.result, i32 %a, i32 %b, i32 %c, i32 %d, {{.*}}* byval align 8 %e, i32 %f)
struct s13_0 { long long f0[3]; };
struct s13_1 { long long f0[2]; };
struct s13_0 f13(int a, int b, int c, int d,
struct s13_1 e, int f) { while (1) {} }
// CHECK: define void @f14({{.*}}, i8 signext %X)
void f14(int a, int b, int c, int d, int e, int f, char X) {}
// CHECK: define void @f15({{.*}}, i8* %X)
void f15(int a, int b, int c, int d, int e, int f, void *X) {}
// CHECK: define void @f16({{.*}}, float %X)
void f16(float a, float b, float c, float d, float e, float f, float g, float h,
float X) {}
// CHECK: define void @f17({{.*}}, x86_fp80 %X)
void f17(float a, float b, float c, float d, float e, float f, float g, float h,
long double X) {}
X86-64: pass/return structs of float/int as float/i32 instead of double/i64 to make the code generated for ABI cleaner. Passing in the low part of a double is the same as passing in a float. For example, we now compile: struct DeclGroup { float NumDecls; }; float foo(DeclGroup D); void bar(DeclGroup *D) { foo(*D); } into: %struct.DeclGroup = type { float } define void @_Z3barP9DeclGroup(%struct.DeclGroup* %D) nounwind { entry: %D.addr = alloca %struct.DeclGroup*, align 8 ; <%struct.DeclGroup**> [#uses=2] %agg.tmp = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2] store %struct.DeclGroup* %D, %struct.DeclGroup** %D.addr %tmp = load %struct.DeclGroup** %D.addr ; <%struct.DeclGroup*> [#uses=1] %tmp1 = bitcast %struct.DeclGroup* %agg.tmp to i8* ; <i8*> [#uses=1] %tmp2 = bitcast %struct.DeclGroup* %tmp to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 4, i32 4, i1 false) %coerce.dive = getelementptr %struct.DeclGroup* %agg.tmp, i32 0, i32 0 ; <float*> [#uses=1] %0 = load float* %coerce.dive, align 1 ; <float> [#uses=1] %call = call float @_Z3foo9DeclGroup(float %0) ; <float> [#uses=0] ret void } instead of: %struct.DeclGroup = type { float } define void @_Z3barP9DeclGroup(%struct.DeclGroup* %D) nounwind { entry: %D.addr = alloca %struct.DeclGroup*, align 8 ; <%struct.DeclGroup**> [#uses=2] %agg.tmp = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2] %tmp3 = alloca double ; <double*> [#uses=2] store %struct.DeclGroup* %D, %struct.DeclGroup** %D.addr %tmp = load %struct.DeclGroup** %D.addr ; <%struct.DeclGroup*> [#uses=1] %tmp1 = bitcast %struct.DeclGroup* %agg.tmp to i8* ; <i8*> [#uses=1] %tmp2 = bitcast %struct.DeclGroup* %tmp to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 4, i32 4, i1 false) %coerce.dive = getelementptr %struct.DeclGroup* %agg.tmp, i32 0, i32 0 ; <float*> [#uses=1] %0 = bitcast double* %tmp3 to float* ; <float*> [#uses=1] %1 = load float* %coerce.dive ; <float> [#uses=1] store float %1, float* %0, align 1 %2 = load double* %tmp3 ; <double> [#uses=1] %call = call float @_Z3foo9DeclGroup(double %2) ; <float> [#uses=0] ret void } which is this machine code (at -O0): __Z3barP9DeclGroup: subq $24, %rsp movq %rdi, 16(%rsp) movq 16(%rsp), %rdi leaq 8(%rsp), %rax movl (%rdi), %ecx movl %ecx, (%rax) movss 8(%rsp), %xmm0 callq __Z3foo9DeclGroup addq $24, %rsp ret vs this: __Z3barP9DeclGroup: subq $24, %rsp movq %rdi, 16(%rsp) movq 16(%rsp), %rdi leaq 8(%rsp), %rax movl (%rdi), %ecx movl %ecx, (%rax) movss 8(%rsp), %xmm0 movss %xmm0, (%rsp) movsd (%rsp), %xmm0 callq __Z3foo9DeclGroup addq $24, %rsp ret At -O3, it is the difference between this now: __Z3barP9DeclGroup: movss (%rdi), %xmm0 jmp __Z3foo9DeclGroup # TAILCALL vs this before: __Z3barP9DeclGroup: movl (%rdi), %eax movd %rax, %xmm0 jmp __Z3foo9DeclGroup # TAILCALL llvm-svn: 107048
2010-06-29 03:56:59 +08:00
// Check for valid coercion. The struct should be passed/returned as i32, not
// as i64 for better code quality.
// rdar://8135035
// CHECK: define void @f18(i32 %a, i32 %f18_arg1.coerce)
struct f18_s0 { int f0; };
void f18(int a, struct f18_s0 f18_arg1) { while (1) {} }
// Check byval alignment.
// CHECK: define void @f19(%struct.s19* byval align 16 %x)
struct s19 {
long double a;
};
void f19(struct s19 x) {}
// CHECK: define void @f20(%struct.s20* byval align 32 %x)
struct __attribute__((aligned(32))) s20 {
int x;
int y;
};
void f20(struct s20 x) {}
Change X86_64ABIInfo to have ASTContext and TargetData ivars to avoid passing ASTContext down through all the methods it has. When classifying an argument, or argument piece, as INTEGER, check to see if we have a pointer at exactly the same offset in the preferred type. If so, use that pointer type instead of i64. This allows us to compile A function taking a stringref into something like this: define i8* @foo(i64 %D.coerce0, i8* %D.coerce1) nounwind ssp { entry: %D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=4] %0 = getelementptr %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] store i64 %D.coerce0, i64* %0 %1 = getelementptr %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1] store i8* %D.coerce1, i8** %1 %tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] %tmp1 = load i64* %tmp ; <i64> [#uses=1] %tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1] %tmp3 = load i8** %tmp2 ; <i8*> [#uses=1] %add.ptr = getelementptr inbounds i8* %tmp3, i64 %tmp1 ; <i8*> [#uses=1] ret i8* %add.ptr } instead of this: define i8* @foo(i64 %D.coerce0, i64 %D.coerce1) nounwind ssp { entry: %D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=3] %0 = insertvalue %0 undef, i64 %D.coerce0, 0 ; <%0> [#uses=1] %1 = insertvalue %0 %0, i64 %D.coerce1, 1 ; <%0> [#uses=1] %2 = bitcast %struct.DeclGroup* %D to %0* ; <%0*> [#uses=1] store %0 %1, %0* %2, align 1 %tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] %tmp1 = load i64* %tmp ; <i64> [#uses=1] %tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1] %tmp3 = load i8** %tmp2 ; <i8*> [#uses=1] %add.ptr = getelementptr inbounds i8* %tmp3, i64 %tmp1 ; <i8*> [#uses=1] ret i8* %add.ptr } This implements rdar://7375902 - [codegen quality] clang x86-64 ABI lowering code punishing StringRef llvm-svn: 107123
2010-06-29 14:01:59 +08:00
struct StringRef {
long x;
const char *Ptr;
};
// rdar://7375902
// CHECK: define i8* @f21(i64 %S.coerce0, i8* %S.coerce1)
const char *f21(struct StringRef S) { return S.x+S.Ptr; }
// PR7567
typedef __attribute__ ((aligned(16))) struct f22s { unsigned long long x[2]; } L;
void f22(L x, L y) { }
// CHECK: @f22
// CHECK: %x = alloca{{.*}}, align 16
// CHECK: %y = alloca{{.*}}, align 16
// PR7714
struct f23S {
short f0;
unsigned f1;
int f2;
};
void f23(int A, struct f23S B) {
// CHECK: define void @f23(i32 %A, i64 %B.coerce0, i32 %B.coerce1)
}
struct f24s { long a; int b; };
struct f23S f24(struct f23S *X, struct f24s *P2) {
return *X;
// CHECK: define %struct.f24s @f24(%struct.f23S* %X, %struct.f24s* %P2)
}
Kill off the 'coerce' ABI passing form. Now 'direct' and 'extend' always have a "coerce to" type which often matches the default lowering of Clang type to LLVM IR type, but the coerce case can be handled by making them not be the same. This simplifies things and fixes issues where X86-64 abi lowering would return coerce after making preferred types exactly match up. This caused us to compile: typedef float v4f32 __attribute__((__vector_size__(16))); v4f32 foo(v4f32 X) { return X+X; } into this code at -O0: define <4 x float> @foo(<4 x float> %X.coerce) nounwind { entry: %retval = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2] %coerce = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2] %X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3] store <4 x float> %X.coerce, <4 x float>* %coerce %X = load <4 x float>* %coerce ; <<4 x float>> [#uses=1] store <4 x float> %X, <4 x float>* %X.addr %tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1] store <4 x float> %add, <4 x float>* %retval %0 = load <4 x float>* %retval ; <<4 x float>> [#uses=1] ret <4 x float> %0 } Now we get: define <4 x float> @foo(<4 x float> %X) nounwind { entry: %X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3] store <4 x float> %X, <4 x float>* %X.addr %tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1] ret <4 x float> %add } This implements rdar://8248065 llvm-svn: 109733
2010-07-29 14:26:06 +08:00
// rdar://8248065
typedef float v4f32 __attribute__((__vector_size__(16)));
v4f32 f25(v4f32 X) {
Kill off the 'coerce' ABI passing form. Now 'direct' and 'extend' always have a "coerce to" type which often matches the default lowering of Clang type to LLVM IR type, but the coerce case can be handled by making them not be the same. This simplifies things and fixes issues where X86-64 abi lowering would return coerce after making preferred types exactly match up. This caused us to compile: typedef float v4f32 __attribute__((__vector_size__(16))); v4f32 foo(v4f32 X) { return X+X; } into this code at -O0: define <4 x float> @foo(<4 x float> %X.coerce) nounwind { entry: %retval = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2] %coerce = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2] %X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3] store <4 x float> %X.coerce, <4 x float>* %coerce %X = load <4 x float>* %coerce ; <<4 x float>> [#uses=1] store <4 x float> %X, <4 x float>* %X.addr %tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1] store <4 x float> %add, <4 x float>* %retval %0 = load <4 x float>* %retval ; <<4 x float>> [#uses=1] ret <4 x float> %0 } Now we get: define <4 x float> @foo(<4 x float> %X) nounwind { entry: %X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3] store <4 x float> %X, <4 x float>* %X.addr %tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1] ret <4 x float> %add } This implements rdar://8248065 llvm-svn: 109733
2010-07-29 14:26:06 +08:00
// CHECK: define <4 x float> @f25(<4 x float> %X)
// CHECK-NOT: alloca
// CHECK: alloca <4 x float>
Kill off the 'coerce' ABI passing form. Now 'direct' and 'extend' always have a "coerce to" type which often matches the default lowering of Clang type to LLVM IR type, but the coerce case can be handled by making them not be the same. This simplifies things and fixes issues where X86-64 abi lowering would return coerce after making preferred types exactly match up. This caused us to compile: typedef float v4f32 __attribute__((__vector_size__(16))); v4f32 foo(v4f32 X) { return X+X; } into this code at -O0: define <4 x float> @foo(<4 x float> %X.coerce) nounwind { entry: %retval = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2] %coerce = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2] %X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3] store <4 x float> %X.coerce, <4 x float>* %coerce %X = load <4 x float>* %coerce ; <<4 x float>> [#uses=1] store <4 x float> %X, <4 x float>* %X.addr %tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1] store <4 x float> %add, <4 x float>* %retval %0 = load <4 x float>* %retval ; <<4 x float>> [#uses=1] ret <4 x float> %0 } Now we get: define <4 x float> @foo(<4 x float> %X) nounwind { entry: %X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3] store <4 x float> %X, <4 x float>* %X.addr %tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1] ret <4 x float> %add } This implements rdar://8248065 llvm-svn: 109733
2010-07-29 14:26:06 +08:00
// CHECK-NOT: alloca
// CHECK: store <4 x float> %X, <4 x float>*
Kill off the 'coerce' ABI passing form. Now 'direct' and 'extend' always have a "coerce to" type which often matches the default lowering of Clang type to LLVM IR type, but the coerce case can be handled by making them not be the same. This simplifies things and fixes issues where X86-64 abi lowering would return coerce after making preferred types exactly match up. This caused us to compile: typedef float v4f32 __attribute__((__vector_size__(16))); v4f32 foo(v4f32 X) { return X+X; } into this code at -O0: define <4 x float> @foo(<4 x float> %X.coerce) nounwind { entry: %retval = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2] %coerce = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2] %X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3] store <4 x float> %X.coerce, <4 x float>* %coerce %X = load <4 x float>* %coerce ; <<4 x float>> [#uses=1] store <4 x float> %X, <4 x float>* %X.addr %tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1] store <4 x float> %add, <4 x float>* %retval %0 = load <4 x float>* %retval ; <<4 x float>> [#uses=1] ret <4 x float> %0 } Now we get: define <4 x float> @foo(<4 x float> %X) nounwind { entry: %X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3] store <4 x float> %X, <4 x float>* %X.addr %tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1] ret <4 x float> %add } This implements rdar://8248065 llvm-svn: 109733
2010-07-29 14:26:06 +08:00
// CHECK-NOT: store
// CHECK: ret <4 x float>
return X+X;
}
now that we have CGT around, we can start using preferred types for return values too. Instead of compiling something like: struct foo { int *X; float *Y; }; struct foo test(struct foo *P) { return *P; } to: %1 = type { i64, i64 } define %1 @test(%struct.foo* %P) nounwind { entry: %retval = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=2] %P.addr = alloca %struct.foo*, align 8 ; <%struct.foo**> [#uses=2] store %struct.foo* %P, %struct.foo** %P.addr %tmp = load %struct.foo** %P.addr ; <%struct.foo*> [#uses=1] %tmp1 = bitcast %struct.foo* %retval to i8* ; <i8*> [#uses=1] %tmp2 = bitcast %struct.foo* %tmp to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 16, i32 8, i1 false) %0 = bitcast %struct.foo* %retval to %1* ; <%1*> [#uses=1] %1 = load %1* %0, align 1 ; <%1> [#uses=1] ret %1 %1 } We now get the result more type safe, with: define %struct.foo @test(%struct.foo* %P) nounwind { entry: %retval = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=2] %P.addr = alloca %struct.foo*, align 8 ; <%struct.foo**> [#uses=2] store %struct.foo* %P, %struct.foo** %P.addr %tmp = load %struct.foo** %P.addr ; <%struct.foo*> [#uses=1] %tmp1 = bitcast %struct.foo* %retval to i8* ; <i8*> [#uses=1] %tmp2 = bitcast %struct.foo* %tmp to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 16, i32 8, i1 false) %0 = load %struct.foo* %retval ; <%struct.foo> [#uses=1] ret %struct.foo %0 } That memcpy is completely terrible, but I don't know how to fix it. llvm-svn: 109729
2010-07-29 12:46:19 +08:00
struct foo26 {
int *X;
float *Y;
};
now that we have CGT around, we can start using preferred types for return values too. Instead of compiling something like: struct foo { int *X; float *Y; }; struct foo test(struct foo *P) { return *P; } to: %1 = type { i64, i64 } define %1 @test(%struct.foo* %P) nounwind { entry: %retval = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=2] %P.addr = alloca %struct.foo*, align 8 ; <%struct.foo**> [#uses=2] store %struct.foo* %P, %struct.foo** %P.addr %tmp = load %struct.foo** %P.addr ; <%struct.foo*> [#uses=1] %tmp1 = bitcast %struct.foo* %retval to i8* ; <i8*> [#uses=1] %tmp2 = bitcast %struct.foo* %tmp to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 16, i32 8, i1 false) %0 = bitcast %struct.foo* %retval to %1* ; <%1*> [#uses=1] %1 = load %1* %0, align 1 ; <%1> [#uses=1] ret %1 %1 } We now get the result more type safe, with: define %struct.foo @test(%struct.foo* %P) nounwind { entry: %retval = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=2] %P.addr = alloca %struct.foo*, align 8 ; <%struct.foo**> [#uses=2] store %struct.foo* %P, %struct.foo** %P.addr %tmp = load %struct.foo** %P.addr ; <%struct.foo*> [#uses=1] %tmp1 = bitcast %struct.foo* %retval to i8* ; <i8*> [#uses=1] %tmp2 = bitcast %struct.foo* %tmp to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 16, i32 8, i1 false) %0 = load %struct.foo* %retval ; <%struct.foo> [#uses=1] ret %struct.foo %0 } That memcpy is completely terrible, but I don't know how to fix it. llvm-svn: 109729
2010-07-29 12:46:19 +08:00
struct foo26 f26(struct foo26 *P) {
// CHECK: define %struct.foo26 @f26(%struct.foo26* %P)
return *P;
}
struct v4f32wrapper {
v4f32 v;
};
struct v4f32wrapper f27(struct v4f32wrapper X) {
// CHECK: define <4 x float> @f27(<4 x float> %X.coerce)
return X;
}
// rdar://5711709
struct f28c {
double x;
int y;
};
void f28(struct f28c C) {
// CHECK: define void @f28(double %C.coerce0, i32 %C.coerce1)
}
struct f29a {
struct c {
double x;
int y;
} x[1];
};
void f29a(struct f29a A) {
// CHECK: define void @f29a(double %A.coerce0, i32 %A.coerce1)
}
// rdar://8249586
struct S0 { char f0[8]; char f2; char f3; char f4; };
void f30(struct S0 p_4) {
// CHECK: define void @f30(i64 %p_4.coerce0, i24 %p_4.coerce1)
}
fix rdar://8251384, another case where we could access beyond the end of a struct. This improves the case when the struct being passed contains 3 floats, either due to a struct or array of 3 things. Before we'd generate this IR for the testcase: define float @bar(double %X.coerce0, double %X.coerce1) nounwind { entry: %X = alloca %struct.foof, align 8 ; <%struct.foof*> [#uses=2] %0 = bitcast %struct.foof* %X to %1* ; <%1*> [#uses=2] %1 = getelementptr %1* %0, i32 0, i32 0 ; <double*> [#uses=1] store double %X.coerce0, double* %1 %2 = getelementptr %1* %0, i32 0, i32 1 ; <double*> [#uses=1] store double %X.coerce1, double* %2 %tmp = getelementptr inbounds %struct.foof* %X, i32 0, i32 2 ; <float*> [#uses=1] %tmp1 = load float* %tmp ; <float> [#uses=1] ret float %tmp1 } which compiled (with optimization) to: _bar: ## @bar ## BB#0: ## %entry movd %xmm1, %rax movd %eax, %xmm0 ret Now we produce: define float @bar(double %X.coerce0, float %X.coerce1) nounwind { entry: %X = alloca %struct.foof, align 8 ; <%struct.foof*> [#uses=2] %0 = bitcast %struct.foof* %X to %0* ; <%0*> [#uses=2] %1 = getelementptr %0* %0, i32 0, i32 0 ; <double*> [#uses=1] store double %X.coerce0, double* %1 %2 = getelementptr %0* %0, i32 0, i32 1 ; <float*> [#uses=1] store float %X.coerce1, float* %2 %tmp = getelementptr inbounds %struct.foof* %X, i32 0, i32 2 ; <float*> [#uses=1] %tmp1 = load float* %tmp ; <float> [#uses=1] ret float %tmp1 } and: _bar: ## @bar ## BB#0: ## %entry movaps %xmm1, %xmm0 ret llvm-svn: 109776
2010-07-30 02:13:09 +08:00
// Pass the third element as a float when followed by tail padding.
// rdar://8251384
struct f31foo { float a, b, c; };
float f31(struct f31foo X) {
Finally pass "two floats in a 64-bit unit" as a <2 x float> instead of as a double in the x86-64 ABI. This allows us to generate much better code for certain things, e.g.: _Complex float f32(_Complex float A, _Complex float B) { return A+B; } Used to compile into (look at the integer silliness!): _f32: ## @f32 ## BB#0: ## %entry movd %xmm1, %rax movd %eax, %xmm1 movd %xmm0, %rcx movd %ecx, %xmm0 addss %xmm1, %xmm0 movd %xmm0, %edx shrq $32, %rax movd %eax, %xmm0 shrq $32, %rcx movd %ecx, %xmm1 addss %xmm0, %xmm1 movd %xmm1, %eax shlq $32, %rax addq %rdx, %rax movd %rax, %xmm0 ret Now we get: _f32: ## @f32 movdqa %xmm0, %xmm2 addss %xmm1, %xmm2 pshufd $16, %xmm2, %xmm2 pshufd $1, %xmm1, %xmm1 pshufd $1, %xmm0, %xmm0 addss %xmm1, %xmm0 pshufd $16, %xmm0, %xmm1 movdqa %xmm2, %xmm0 unpcklps %xmm1, %xmm0 ret and compile stuff like: extern float _Complex ccoshf( float _Complex ) ; float _Complex ccosf ( float _Complex z ) { float _Complex iz; (__real__ iz) = -(__imag__ z); (__imag__ iz) = (__real__ z); return ccoshf(iz); } into: _ccosf: ## @ccosf ## BB#0: ## %entry pshufd $1, %xmm0, %xmm1 xorps LCPI4_0(%rip), %xmm1 unpcklps %xmm0, %xmm1 movaps %xmm1, %xmm0 jmp _ccoshf ## TAILCALL instead of: _ccosf: ## @ccosf ## BB#0: ## %entry movd %xmm0, %rax movq %rax, %rcx shlq $32, %rcx shrq $32, %rax xorl $-2147483648, %eax ## imm = 0xFFFFFFFF80000000 addq %rcx, %rax movd %rax, %xmm0 jmp _ccoshf ## TAILCALL There is still "stuff to be done" here for the struct case, but this resolves rdar://6379669 - [x86-64 ABI] Pass and return _Complex float / double efficiently llvm-svn: 112111
2010-08-26 07:39:14 +08:00
// CHECK: define float @f31(<2 x float> %X.coerce0, float %X.coerce1)
fix rdar://8251384, another case where we could access beyond the end of a struct. This improves the case when the struct being passed contains 3 floats, either due to a struct or array of 3 things. Before we'd generate this IR for the testcase: define float @bar(double %X.coerce0, double %X.coerce1) nounwind { entry: %X = alloca %struct.foof, align 8 ; <%struct.foof*> [#uses=2] %0 = bitcast %struct.foof* %X to %1* ; <%1*> [#uses=2] %1 = getelementptr %1* %0, i32 0, i32 0 ; <double*> [#uses=1] store double %X.coerce0, double* %1 %2 = getelementptr %1* %0, i32 0, i32 1 ; <double*> [#uses=1] store double %X.coerce1, double* %2 %tmp = getelementptr inbounds %struct.foof* %X, i32 0, i32 2 ; <float*> [#uses=1] %tmp1 = load float* %tmp ; <float> [#uses=1] ret float %tmp1 } which compiled (with optimization) to: _bar: ## @bar ## BB#0: ## %entry movd %xmm1, %rax movd %eax, %xmm0 ret Now we produce: define float @bar(double %X.coerce0, float %X.coerce1) nounwind { entry: %X = alloca %struct.foof, align 8 ; <%struct.foof*> [#uses=2] %0 = bitcast %struct.foof* %X to %0* ; <%0*> [#uses=2] %1 = getelementptr %0* %0, i32 0, i32 0 ; <double*> [#uses=1] store double %X.coerce0, double* %1 %2 = getelementptr %0* %0, i32 0, i32 1 ; <float*> [#uses=1] store float %X.coerce1, float* %2 %tmp = getelementptr inbounds %struct.foof* %X, i32 0, i32 2 ; <float*> [#uses=1] %tmp1 = load float* %tmp ; <float> [#uses=1] ret float %tmp1 } and: _bar: ## @bar ## BB#0: ## %entry movaps %xmm1, %xmm0 ret llvm-svn: 109776
2010-07-30 02:13:09 +08:00
return X.c;
}
Finally pass "two floats in a 64-bit unit" as a <2 x float> instead of as a double in the x86-64 ABI. This allows us to generate much better code for certain things, e.g.: _Complex float f32(_Complex float A, _Complex float B) { return A+B; } Used to compile into (look at the integer silliness!): _f32: ## @f32 ## BB#0: ## %entry movd %xmm1, %rax movd %eax, %xmm1 movd %xmm0, %rcx movd %ecx, %xmm0 addss %xmm1, %xmm0 movd %xmm0, %edx shrq $32, %rax movd %eax, %xmm0 shrq $32, %rcx movd %ecx, %xmm1 addss %xmm0, %xmm1 movd %xmm1, %eax shlq $32, %rax addq %rdx, %rax movd %rax, %xmm0 ret Now we get: _f32: ## @f32 movdqa %xmm0, %xmm2 addss %xmm1, %xmm2 pshufd $16, %xmm2, %xmm2 pshufd $1, %xmm1, %xmm1 pshufd $1, %xmm0, %xmm0 addss %xmm1, %xmm0 pshufd $16, %xmm0, %xmm1 movdqa %xmm2, %xmm0 unpcklps %xmm1, %xmm0 ret and compile stuff like: extern float _Complex ccoshf( float _Complex ) ; float _Complex ccosf ( float _Complex z ) { float _Complex iz; (__real__ iz) = -(__imag__ z); (__imag__ iz) = (__real__ z); return ccoshf(iz); } into: _ccosf: ## @ccosf ## BB#0: ## %entry pshufd $1, %xmm0, %xmm1 xorps LCPI4_0(%rip), %xmm1 unpcklps %xmm0, %xmm1 movaps %xmm1, %xmm0 jmp _ccoshf ## TAILCALL instead of: _ccosf: ## @ccosf ## BB#0: ## %entry movd %xmm0, %rax movq %rax, %rcx shlq $32, %rcx shrq $32, %rax xorl $-2147483648, %eax ## imm = 0xFFFFFFFF80000000 addq %rcx, %rax movd %rax, %xmm0 jmp _ccoshf ## TAILCALL There is still "stuff to be done" here for the struct case, but this resolves rdar://6379669 - [x86-64 ABI] Pass and return _Complex float / double efficiently llvm-svn: 112111
2010-08-26 07:39:14 +08:00
_Complex float f32(_Complex float A, _Complex float B) {
// rdar://6379669
// CHECK: define <2 x float> @f32(<2 x float> %A.coerce, <2 x float> %B.coerce)
return A+B;
}
fix rdar://8251384, another case where we could access beyond the end of a struct. This improves the case when the struct being passed contains 3 floats, either due to a struct or array of 3 things. Before we'd generate this IR for the testcase: define float @bar(double %X.coerce0, double %X.coerce1) nounwind { entry: %X = alloca %struct.foof, align 8 ; <%struct.foof*> [#uses=2] %0 = bitcast %struct.foof* %X to %1* ; <%1*> [#uses=2] %1 = getelementptr %1* %0, i32 0, i32 0 ; <double*> [#uses=1] store double %X.coerce0, double* %1 %2 = getelementptr %1* %0, i32 0, i32 1 ; <double*> [#uses=1] store double %X.coerce1, double* %2 %tmp = getelementptr inbounds %struct.foof* %X, i32 0, i32 2 ; <float*> [#uses=1] %tmp1 = load float* %tmp ; <float> [#uses=1] ret float %tmp1 } which compiled (with optimization) to: _bar: ## @bar ## BB#0: ## %entry movd %xmm1, %rax movd %eax, %xmm0 ret Now we produce: define float @bar(double %X.coerce0, float %X.coerce1) nounwind { entry: %X = alloca %struct.foof, align 8 ; <%struct.foof*> [#uses=2] %0 = bitcast %struct.foof* %X to %0* ; <%0*> [#uses=2] %1 = getelementptr %0* %0, i32 0, i32 0 ; <double*> [#uses=1] store double %X.coerce0, double* %1 %2 = getelementptr %0* %0, i32 0, i32 1 ; <float*> [#uses=1] store float %X.coerce1, float* %2 %tmp = getelementptr inbounds %struct.foof* %X, i32 0, i32 2 ; <float*> [#uses=1] %tmp1 = load float* %tmp ; <float> [#uses=1] ret float %tmp1 } and: _bar: ## @bar ## BB#0: ## %entry movaps %xmm1, %xmm0 ret llvm-svn: 109776
2010-07-30 02:13:09 +08:00
// rdar://8357396
struct f33s { long x; float c,d; };
void f33(va_list X) {
va_arg(X, struct f33s);
}
typedef unsigned long long v1i64 __attribute__((__vector_size__(8)));
// rdar://8359248
// CHECK: define i64 @f34(i64 %arg.coerce)
v1i64 f34(v1i64 arg) { return arg; }
// rdar://8358475
// CHECK: define i64 @f35(i64 %arg.coerce)
typedef unsigned long v1i64_2 __attribute__((__vector_size__(8)));
v1i64_2 f35(v1i64_2 arg) { return arg+arg; }
// rdar://9122143
// CHECK: declare void @func(%struct._str* byval align 16)
typedef struct _str {
union {
long double a;
long c;
};
} str;
void func(str s);
str ss;
void f9122143()
{
func(ss);
}