2022-04-07 18:03:55 +08:00
|
|
|
// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-unknown-unknown -emit-llvm -debug-info-kind=limited -Wno-strict-prototypes -o - %s | \
|
2015-06-23 05:31:43 +08:00
|
|
|
// RUN: FileCheck %s -check-prefix=CHECK -check-prefix=SSE -check-prefix=NO-AVX512
|
2022-04-07 18:03:55 +08:00
|
|
|
// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-unknown-unknown -emit-llvm -debug-info-kind=limited -Wno-strict-prototypes -o - %s -target-feature +avx | \
|
2015-06-23 05:31:43 +08:00
|
|
|
// RUN: FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=NO-AVX512
|
2022-04-07 18:03:55 +08:00
|
|
|
// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-unknown-unknown -emit-llvm -debug-info-kind=limited -Wno-strict-prototypes -o - %s -target-feature +avx512f | \
|
2015-06-23 05:31:43 +08:00
|
|
|
// RUN: FileCheck %s -check-prefix=CHECK -check-prefix=AVX -check-prefix=AVX512
|
2010-08-26 14:28:35 +08:00
|
|
|
#include <stdarg.h>
|
2009-02-14 10:09:24 +08:00
|
|
|
|
2020-12-31 16:27:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} signext i8 @f0()
|
2009-02-14 10:09:24 +08:00
|
|
|
char f0(void) {
|
2009-07-22 04:52:43 +08:00
|
|
|
return 0;
|
2009-02-14 10:09:24 +08:00
|
|
|
}
|
|
|
|
|
2020-12-31 16:27:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} signext i16 @f1()
|
2009-02-14 10:09:24 +08:00
|
|
|
short f1(void) {
|
2009-07-22 04:52:43 +08:00
|
|
|
return 0;
|
2009-02-14 10:09:24 +08:00
|
|
|
}
|
|
|
|
|
2020-12-31 16:27:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} i32 @f2()
|
2009-02-14 10:09:24 +08:00
|
|
|
int f2(void) {
|
2009-07-22 04:52:43 +08:00
|
|
|
return 0;
|
2009-02-14 10:09:24 +08:00
|
|
|
}
|
|
|
|
|
2020-12-31 16:27:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} float @f3()
|
2009-02-14 10:09:24 +08:00
|
|
|
float f3(void) {
|
2009-07-22 04:52:43 +08:00
|
|
|
return 0;
|
2009-02-14 10:09:24 +08:00
|
|
|
}
|
|
|
|
|
2020-12-31 16:27:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} double @f4()
|
2009-02-14 10:09:24 +08:00
|
|
|
double f4(void) {
|
2009-07-22 04:52:43 +08:00
|
|
|
return 0;
|
2009-02-14 10:09:24 +08:00
|
|
|
}
|
|
|
|
|
2020-12-31 16:27:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} x86_fp80 @f5()
|
2009-02-14 10:09:24 +08:00
|
|
|
long double f5(void) {
|
2009-07-22 04:52:43 +08:00
|
|
|
return 0;
|
2009-02-14 10:09:24 +08:00
|
|
|
}
|
|
|
|
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} void @f6(i8 noundef signext %a0, i16 noundef signext %a1, i32 noundef %a2, i64 noundef %a3, i8* noundef %a4)
|
2009-02-14 10:09:24 +08:00
|
|
|
void f6(char a0, short a1, int a2, long long a3, void *a4) {
|
|
|
|
}
|
2009-02-27 01:38:19 +08:00
|
|
|
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} void @f7(i32 noundef %a0)
|
2010-04-22 03:10:54 +08:00
|
|
|
typedef enum { A, B, C } e7;
|
|
|
|
void f7(e7 a0) {
|
2009-02-27 03:00:14 +08:00
|
|
|
}
|
2009-03-07 01:50:25 +08:00
|
|
|
|
|
|
|
// Test merging/passing of upper eightbyte with X87 class.
|
2010-04-22 03:10:54 +08:00
|
|
|
//
|
2020-12-31 16:27:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} void @f8_1(%union.u8* noalias sret(%union.u8) align 16 %agg.result)
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} void @f8_2(%union.u8* noundef byval(%union.u8) align 16 %a0)
|
2009-03-07 01:50:25 +08:00
|
|
|
union u8 {
|
|
|
|
long double a;
|
|
|
|
int b;
|
|
|
|
};
|
2022-02-13 21:02:46 +08:00
|
|
|
union u8 f8_1(void) { while (1) {} }
|
2009-03-07 01:50:25 +08:00
|
|
|
void f8_2(union u8 a0) {}
|
2009-05-09 06:26:44 +08:00
|
|
|
|
2020-12-31 16:27:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} i64 @f9()
|
2009-07-22 04:52:43 +08:00
|
|
|
struct s9 { int a; int b; int : 0; } f9(void) { while (1) {} }
|
2009-05-09 06:26:44 +08:00
|
|
|
|
2020-12-31 16:27:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} void @f10(i64 %a0.coerce)
|
2009-05-09 06:26:44 +08:00
|
|
|
struct s10 { int a; int b; int : 0; };
|
|
|
|
void f10(struct s10 a0) {}
|
|
|
|
|
2020-12-31 16:27:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} void @f11(%union.anon* noalias sret(%union.anon) align 16 %agg.result)
|
2022-02-13 21:02:46 +08:00
|
|
|
union { long double a; float b; } f11(void) { while (1) {} }
|
2009-05-12 23:22:40 +08:00
|
|
|
|
2020-12-31 16:27:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} i32 @f12_0()
|
|
|
|
// CHECK-LABEL: define{{.*}} void @f12_1(i32 %a0.coerce)
|
2009-05-14 02:54:26 +08:00
|
|
|
struct s12 { int a __attribute__((aligned(16))); };
|
2009-07-22 04:52:43 +08:00
|
|
|
struct s12 f12_0(void) { while (1) {} }
|
2009-05-14 02:54:26 +08:00
|
|
|
void f12_1(struct s12 a0) {}
|
|
|
|
|
2009-05-23 01:33:44 +08:00
|
|
|
// Check that sret parameter is accounted for when checking available integer
|
|
|
|
// registers.
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK: define{{.*}} void @f13(%struct.s13_0* noalias sret(%struct.s13_0) align 8 %agg.result, i32 noundef %a, i32 noundef %b, i32 noundef %c, i32 noundef %d, {{.*}}* noundef byval({{.*}}) align 8 %e, i32 noundef %f)
|
2009-05-23 01:33:44 +08:00
|
|
|
|
|
|
|
struct s13_0 { long long f0[3]; };
|
2009-08-24 03:28:59 +08:00
|
|
|
struct s13_1 { long long f0[2]; };
|
2010-04-22 03:10:54 +08:00
|
|
|
struct s13_0 f13(int a, int b, int c, int d,
|
2009-08-24 03:28:59 +08:00
|
|
|
struct s13_1 e, int f) { while (1) {} }
|
2009-05-23 01:33:44 +08:00
|
|
|
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK: define{{.*}} void @f14({{.*}}, i8 noundef signext %X)
|
2010-04-22 03:10:54 +08:00
|
|
|
void f14(int a, int b, int c, int d, int e, int f, char X) {}
|
|
|
|
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK: define{{.*}} void @f15({{.*}}, i8* noundef %X)
|
2010-04-22 03:10:54 +08:00
|
|
|
void f15(int a, int b, int c, int d, int e, int f, void *X) {}
|
|
|
|
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK: define{{.*}} void @f16({{.*}}, float noundef %X)
|
2009-05-27 00:37:37 +08:00
|
|
|
void f16(float a, float b, float c, float d, float e, float f, float g, float h,
|
|
|
|
float X) {}
|
2010-04-22 03:10:54 +08:00
|
|
|
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK: define{{.*}} void @f17({{.*}}, x86_fp80 noundef %X)
|
2009-05-27 00:37:37 +08:00
|
|
|
void f17(float a, float b, float c, float d, float e, float f, float g, float h,
|
|
|
|
long double X) {}
|
|
|
|
|
X86-64:
pass/return structs of float/int as float/i32 instead of double/i64
to make the code generated for ABI cleaner. Passing in the low part
of a double is the same as passing in a float.
For example, we now compile:
struct DeclGroup { float NumDecls; };
float foo(DeclGroup D);
void bar(DeclGroup *D) {
foo(*D);
}
into:
%struct.DeclGroup = type { float }
define void @_Z3barP9DeclGroup(%struct.DeclGroup* %D) nounwind {
entry:
%D.addr = alloca %struct.DeclGroup*, align 8 ; <%struct.DeclGroup**> [#uses=2]
%agg.tmp = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2]
store %struct.DeclGroup* %D, %struct.DeclGroup** %D.addr
%tmp = load %struct.DeclGroup** %D.addr ; <%struct.DeclGroup*> [#uses=1]
%tmp1 = bitcast %struct.DeclGroup* %agg.tmp to i8* ; <i8*> [#uses=1]
%tmp2 = bitcast %struct.DeclGroup* %tmp to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 4, i32 4, i1 false)
%coerce.dive = getelementptr %struct.DeclGroup* %agg.tmp, i32 0, i32 0 ; <float*> [#uses=1]
%0 = load float* %coerce.dive, align 1 ; <float> [#uses=1]
%call = call float @_Z3foo9DeclGroup(float %0) ; <float> [#uses=0]
ret void
}
instead of:
%struct.DeclGroup = type { float }
define void @_Z3barP9DeclGroup(%struct.DeclGroup* %D) nounwind {
entry:
%D.addr = alloca %struct.DeclGroup*, align 8 ; <%struct.DeclGroup**> [#uses=2]
%agg.tmp = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2]
%tmp3 = alloca double ; <double*> [#uses=2]
store %struct.DeclGroup* %D, %struct.DeclGroup** %D.addr
%tmp = load %struct.DeclGroup** %D.addr ; <%struct.DeclGroup*> [#uses=1]
%tmp1 = bitcast %struct.DeclGroup* %agg.tmp to i8* ; <i8*> [#uses=1]
%tmp2 = bitcast %struct.DeclGroup* %tmp to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 4, i32 4, i1 false)
%coerce.dive = getelementptr %struct.DeclGroup* %agg.tmp, i32 0, i32 0 ; <float*> [#uses=1]
%0 = bitcast double* %tmp3 to float* ; <float*> [#uses=1]
%1 = load float* %coerce.dive ; <float> [#uses=1]
store float %1, float* %0, align 1
%2 = load double* %tmp3 ; <double> [#uses=1]
%call = call float @_Z3foo9DeclGroup(double %2) ; <float> [#uses=0]
ret void
}
which is this machine code (at -O0):
__Z3barP9DeclGroup:
subq $24, %rsp
movq %rdi, 16(%rsp)
movq 16(%rsp), %rdi
leaq 8(%rsp), %rax
movl (%rdi), %ecx
movl %ecx, (%rax)
movss 8(%rsp), %xmm0
callq __Z3foo9DeclGroup
addq $24, %rsp
ret
vs this:
__Z3barP9DeclGroup:
subq $24, %rsp
movq %rdi, 16(%rsp)
movq 16(%rsp), %rdi
leaq 8(%rsp), %rax
movl (%rdi), %ecx
movl %ecx, (%rax)
movss 8(%rsp), %xmm0
movss %xmm0, (%rsp)
movsd (%rsp), %xmm0
callq __Z3foo9DeclGroup
addq $24, %rsp
ret
At -O3, it is the difference between this now:
__Z3barP9DeclGroup:
movss (%rdi), %xmm0
jmp __Z3foo9DeclGroup # TAILCALL
vs this before:
__Z3barP9DeclGroup:
movl (%rdi), %eax
movd %rax, %xmm0
jmp __Z3foo9DeclGroup # TAILCALL
llvm-svn: 107048
2010-06-29 03:56:59 +08:00
|
|
|
// Check for valid coercion. The struct should be passed/returned as i32, not
|
|
|
|
// as i64 for better code quality.
|
|
|
|
// rdar://8135035
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} void @f18(i32 noundef %a, i32 %f18_arg1.coerce)
|
2009-08-24 03:28:59 +08:00
|
|
|
struct f18_s0 { int f0; };
|
|
|
|
void f18(int a, struct f18_s0 f18_arg1) { while (1) {} }
|
2009-06-05 15:58:54 +08:00
|
|
|
|
2010-04-22 03:49:55 +08:00
|
|
|
// Check byval alignment.
|
|
|
|
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} void @f19(%struct.s19* noundef byval(%struct.s19) align 16 %x)
|
2010-04-22 03:49:55 +08:00
|
|
|
struct s19 {
|
|
|
|
long double a;
|
|
|
|
};
|
|
|
|
void f19(struct s19 x) {}
|
|
|
|
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} void @f20(%struct.s20* noundef byval(%struct.s20) align 32 %x)
|
2010-04-22 03:49:55 +08:00
|
|
|
struct __attribute__((aligned(32))) s20 {
|
|
|
|
int x;
|
|
|
|
int y;
|
|
|
|
};
|
|
|
|
void f20(struct s20 x) {}
|
Change X86_64ABIInfo to have ASTContext and TargetData ivars to
avoid passing ASTContext down through all the methods it has.
When classifying an argument, or argument piece, as INTEGER, check
to see if we have a pointer at exactly the same offset in the
preferred type. If so, use that pointer type instead of i64. This
allows us to compile A function taking a stringref into something
like this:
define i8* @foo(i64 %D.coerce0, i8* %D.coerce1) nounwind ssp {
entry:
%D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=4]
%0 = getelementptr %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1]
store i64 %D.coerce0, i64* %0
%1 = getelementptr %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1]
store i8* %D.coerce1, i8** %1
%tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1]
%tmp1 = load i64* %tmp ; <i64> [#uses=1]
%tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1]
%tmp3 = load i8** %tmp2 ; <i8*> [#uses=1]
%add.ptr = getelementptr inbounds i8* %tmp3, i64 %tmp1 ; <i8*> [#uses=1]
ret i8* %add.ptr
}
instead of this:
define i8* @foo(i64 %D.coerce0, i64 %D.coerce1) nounwind ssp {
entry:
%D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=3]
%0 = insertvalue %0 undef, i64 %D.coerce0, 0 ; <%0> [#uses=1]
%1 = insertvalue %0 %0, i64 %D.coerce1, 1 ; <%0> [#uses=1]
%2 = bitcast %struct.DeclGroup* %D to %0* ; <%0*> [#uses=1]
store %0 %1, %0* %2, align 1
%tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1]
%tmp1 = load i64* %tmp ; <i64> [#uses=1]
%tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1]
%tmp3 = load i8** %tmp2 ; <i8*> [#uses=1]
%add.ptr = getelementptr inbounds i8* %tmp3, i64 %tmp1 ; <i8*> [#uses=1]
ret i8* %add.ptr
}
This implements rdar://7375902 - [codegen quality] clang x86-64 ABI lowering code punishing StringRef
llvm-svn: 107123
2010-06-29 14:01:59 +08:00
|
|
|
|
|
|
|
struct StringRef {
|
|
|
|
long x;
|
|
|
|
const char *Ptr;
|
|
|
|
};
|
|
|
|
|
|
|
|
// rdar://7375902
|
2020-12-31 16:27:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} i8* @f21(i64 %S.coerce0, i8* %S.coerce1)
|
Change X86_64ABIInfo to have ASTContext and TargetData ivars to
avoid passing ASTContext down through all the methods it has.
When classifying an argument, or argument piece, as INTEGER, check
to see if we have a pointer at exactly the same offset in the
preferred type. If so, use that pointer type instead of i64. This
allows us to compile A function taking a stringref into something
like this:
define i8* @foo(i64 %D.coerce0, i8* %D.coerce1) nounwind ssp {
entry:
%D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=4]
%0 = getelementptr %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1]
store i64 %D.coerce0, i64* %0
%1 = getelementptr %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1]
store i8* %D.coerce1, i8** %1
%tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1]
%tmp1 = load i64* %tmp ; <i64> [#uses=1]
%tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1]
%tmp3 = load i8** %tmp2 ; <i8*> [#uses=1]
%add.ptr = getelementptr inbounds i8* %tmp3, i64 %tmp1 ; <i8*> [#uses=1]
ret i8* %add.ptr
}
instead of this:
define i8* @foo(i64 %D.coerce0, i64 %D.coerce1) nounwind ssp {
entry:
%D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=3]
%0 = insertvalue %0 undef, i64 %D.coerce0, 0 ; <%0> [#uses=1]
%1 = insertvalue %0 %0, i64 %D.coerce1, 1 ; <%0> [#uses=1]
%2 = bitcast %struct.DeclGroup* %D to %0* ; <%0*> [#uses=1]
store %0 %1, %0* %2, align 1
%tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1]
%tmp1 = load i64* %tmp ; <i64> [#uses=1]
%tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1]
%tmp3 = load i8** %tmp2 ; <i8*> [#uses=1]
%add.ptr = getelementptr inbounds i8* %tmp3, i64 %tmp1 ; <i8*> [#uses=1]
ret i8* %add.ptr
}
This implements rdar://7375902 - [codegen quality] clang x86-64 ABI lowering code punishing StringRef
llvm-svn: 107123
2010-06-29 14:01:59 +08:00
|
|
|
const char *f21(struct StringRef S) { return S.x+S.Ptr; }
|
|
|
|
|
2010-07-06 04:21:00 +08:00
|
|
|
// PR7567
|
|
|
|
typedef __attribute__ ((aligned(16))) struct f22s { unsigned long long x[2]; } L;
|
|
|
|
void f22(L x, L y) { }
|
|
|
|
// CHECK: @f22
|
|
|
|
// CHECK: %x = alloca{{.*}}, align 16
|
|
|
|
// CHECK: %y = alloca{{.*}}, align 16
|
|
|
|
|
|
|
|
|
2010-07-29 06:15:08 +08:00
|
|
|
|
|
|
|
// PR7714
|
|
|
|
struct f23S {
|
|
|
|
short f0;
|
|
|
|
unsigned f1;
|
|
|
|
int f2;
|
|
|
|
};
|
|
|
|
|
2010-07-29 07:06:14 +08:00
|
|
|
|
2010-07-29 06:15:08 +08:00
|
|
|
void f23(int A, struct f23S B) {
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} void @f23(i32 noundef %A, i64 %B.coerce0, i32 %B.coerce1)
|
2010-07-29 06:15:08 +08:00
|
|
|
}
|
|
|
|
|
2010-07-29 07:06:14 +08:00
|
|
|
struct f24s { long a; int b; };
|
2010-07-29 06:15:08 +08:00
|
|
|
|
2010-07-29 07:06:14 +08:00
|
|
|
struct f23S f24(struct f23S *X, struct f24s *P2) {
|
|
|
|
return *X;
|
2020-09-29 21:33:55 +08:00
|
|
|
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK: define{{.*}} { i64, i32 } @f24(%struct.f23S* noundef %X, %struct.f24s* noundef %P2)
|
2010-07-29 07:06:14 +08:00
|
|
|
}
|
2010-07-29 06:15:08 +08:00
|
|
|
|
Kill off the 'coerce' ABI passing form. Now 'direct' and 'extend' always
have a "coerce to" type which often matches the default lowering of Clang
type to LLVM IR type, but the coerce case can be handled by making them
not be the same.
This simplifies things and fixes issues where X86-64 abi lowering would
return coerce after making preferred types exactly match up. This caused
us to compile:
typedef float v4f32 __attribute__((__vector_size__(16)));
v4f32 foo(v4f32 X) {
return X+X;
}
into this code at -O0:
define <4 x float> @foo(<4 x float> %X.coerce) nounwind {
entry:
%retval = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2]
%coerce = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2]
%X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3]
store <4 x float> %X.coerce, <4 x float>* %coerce
%X = load <4 x float>* %coerce ; <<4 x float>> [#uses=1]
store <4 x float> %X, <4 x float>* %X.addr
%tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1]
store <4 x float> %add, <4 x float>* %retval
%0 = load <4 x float>* %retval ; <<4 x float>> [#uses=1]
ret <4 x float> %0
}
Now we get:
define <4 x float> @foo(<4 x float> %X) nounwind {
entry:
%X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3]
store <4 x float> %X, <4 x float>* %X.addr
%tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1]
ret <4 x float> %add
}
This implements rdar://8248065
llvm-svn: 109733
2010-07-29 14:26:06 +08:00
|
|
|
// rdar://8248065
|
2010-07-29 07:47:21 +08:00
|
|
|
typedef float v4f32 __attribute__((__vector_size__(16)));
|
|
|
|
v4f32 f25(v4f32 X) {
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} <4 x float> @f25(<4 x float> noundef %X)
|
Kill off the 'coerce' ABI passing form. Now 'direct' and 'extend' always
have a "coerce to" type which often matches the default lowering of Clang
type to LLVM IR type, but the coerce case can be handled by making them
not be the same.
This simplifies things and fixes issues where X86-64 abi lowering would
return coerce after making preferred types exactly match up. This caused
us to compile:
typedef float v4f32 __attribute__((__vector_size__(16)));
v4f32 foo(v4f32 X) {
return X+X;
}
into this code at -O0:
define <4 x float> @foo(<4 x float> %X.coerce) nounwind {
entry:
%retval = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2]
%coerce = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2]
%X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3]
store <4 x float> %X.coerce, <4 x float>* %coerce
%X = load <4 x float>* %coerce ; <<4 x float>> [#uses=1]
store <4 x float> %X, <4 x float>* %X.addr
%tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1]
store <4 x float> %add, <4 x float>* %retval
%0 = load <4 x float>* %retval ; <<4 x float>> [#uses=1]
ret <4 x float> %0
}
Now we get:
define <4 x float> @foo(<4 x float> %X) nounwind {
entry:
%X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3]
store <4 x float> %X, <4 x float>* %X.addr
%tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1]
ret <4 x float> %add
}
This implements rdar://8248065
llvm-svn: 109733
2010-07-29 14:26:06 +08:00
|
|
|
// CHECK-NOT: alloca
|
2010-07-30 01:14:05 +08:00
|
|
|
// CHECK: alloca <4 x float>
|
Kill off the 'coerce' ABI passing form. Now 'direct' and 'extend' always
have a "coerce to" type which often matches the default lowering of Clang
type to LLVM IR type, but the coerce case can be handled by making them
not be the same.
This simplifies things and fixes issues where X86-64 abi lowering would
return coerce after making preferred types exactly match up. This caused
us to compile:
typedef float v4f32 __attribute__((__vector_size__(16)));
v4f32 foo(v4f32 X) {
return X+X;
}
into this code at -O0:
define <4 x float> @foo(<4 x float> %X.coerce) nounwind {
entry:
%retval = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2]
%coerce = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2]
%X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3]
store <4 x float> %X.coerce, <4 x float>* %coerce
%X = load <4 x float>* %coerce ; <<4 x float>> [#uses=1]
store <4 x float> %X, <4 x float>* %X.addr
%tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1]
store <4 x float> %add, <4 x float>* %retval
%0 = load <4 x float>* %retval ; <<4 x float>> [#uses=1]
ret <4 x float> %0
}
Now we get:
define <4 x float> @foo(<4 x float> %X) nounwind {
entry:
%X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3]
store <4 x float> %X, <4 x float>* %X.addr
%tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1]
ret <4 x float> %add
}
This implements rdar://8248065
llvm-svn: 109733
2010-07-29 14:26:06 +08:00
|
|
|
// CHECK-NOT: alloca
|
2010-07-30 01:14:05 +08:00
|
|
|
// CHECK: store <4 x float> %X, <4 x float>*
|
Kill off the 'coerce' ABI passing form. Now 'direct' and 'extend' always
have a "coerce to" type which often matches the default lowering of Clang
type to LLVM IR type, but the coerce case can be handled by making them
not be the same.
This simplifies things and fixes issues where X86-64 abi lowering would
return coerce after making preferred types exactly match up. This caused
us to compile:
typedef float v4f32 __attribute__((__vector_size__(16)));
v4f32 foo(v4f32 X) {
return X+X;
}
into this code at -O0:
define <4 x float> @foo(<4 x float> %X.coerce) nounwind {
entry:
%retval = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2]
%coerce = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2]
%X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3]
store <4 x float> %X.coerce, <4 x float>* %coerce
%X = load <4 x float>* %coerce ; <<4 x float>> [#uses=1]
store <4 x float> %X, <4 x float>* %X.addr
%tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1]
store <4 x float> %add, <4 x float>* %retval
%0 = load <4 x float>* %retval ; <<4 x float>> [#uses=1]
ret <4 x float> %0
}
Now we get:
define <4 x float> @foo(<4 x float> %X) nounwind {
entry:
%X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3]
store <4 x float> %X, <4 x float>* %X.addr
%tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1]
%add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1]
ret <4 x float> %add
}
This implements rdar://8248065
llvm-svn: 109733
2010-07-29 14:26:06 +08:00
|
|
|
// CHECK-NOT: store
|
|
|
|
// CHECK: ret <4 x float>
|
2010-07-29 07:47:21 +08:00
|
|
|
return X+X;
|
|
|
|
}
|
|
|
|
|
now that we have CGT around, we can start using preferred types
for return values too. Instead of compiling something like:
struct foo {
int *X;
float *Y;
};
struct foo test(struct foo *P) { return *P; }
to:
%1 = type { i64, i64 }
define %1 @test(%struct.foo* %P) nounwind {
entry:
%retval = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=2]
%P.addr = alloca %struct.foo*, align 8 ; <%struct.foo**> [#uses=2]
store %struct.foo* %P, %struct.foo** %P.addr
%tmp = load %struct.foo** %P.addr ; <%struct.foo*> [#uses=1]
%tmp1 = bitcast %struct.foo* %retval to i8* ; <i8*> [#uses=1]
%tmp2 = bitcast %struct.foo* %tmp to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 16, i32 8, i1 false)
%0 = bitcast %struct.foo* %retval to %1* ; <%1*> [#uses=1]
%1 = load %1* %0, align 1 ; <%1> [#uses=1]
ret %1 %1
}
We now get the result more type safe, with:
define %struct.foo @test(%struct.foo* %P) nounwind {
entry:
%retval = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=2]
%P.addr = alloca %struct.foo*, align 8 ; <%struct.foo**> [#uses=2]
store %struct.foo* %P, %struct.foo** %P.addr
%tmp = load %struct.foo** %P.addr ; <%struct.foo*> [#uses=1]
%tmp1 = bitcast %struct.foo* %retval to i8* ; <i8*> [#uses=1]
%tmp2 = bitcast %struct.foo* %tmp to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 16, i32 8, i1 false)
%0 = load %struct.foo* %retval ; <%struct.foo> [#uses=1]
ret %struct.foo %0
}
That memcpy is completely terrible, but I don't know how to fix it.
llvm-svn: 109729
2010-07-29 12:46:19 +08:00
|
|
|
struct foo26 {
|
|
|
|
int *X;
|
|
|
|
float *Y;
|
|
|
|
};
|
2010-07-29 07:47:21 +08:00
|
|
|
|
now that we have CGT around, we can start using preferred types
for return values too. Instead of compiling something like:
struct foo {
int *X;
float *Y;
};
struct foo test(struct foo *P) { return *P; }
to:
%1 = type { i64, i64 }
define %1 @test(%struct.foo* %P) nounwind {
entry:
%retval = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=2]
%P.addr = alloca %struct.foo*, align 8 ; <%struct.foo**> [#uses=2]
store %struct.foo* %P, %struct.foo** %P.addr
%tmp = load %struct.foo** %P.addr ; <%struct.foo*> [#uses=1]
%tmp1 = bitcast %struct.foo* %retval to i8* ; <i8*> [#uses=1]
%tmp2 = bitcast %struct.foo* %tmp to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 16, i32 8, i1 false)
%0 = bitcast %struct.foo* %retval to %1* ; <%1*> [#uses=1]
%1 = load %1* %0, align 1 ; <%1> [#uses=1]
ret %1 %1
}
We now get the result more type safe, with:
define %struct.foo @test(%struct.foo* %P) nounwind {
entry:
%retval = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=2]
%P.addr = alloca %struct.foo*, align 8 ; <%struct.foo**> [#uses=2]
store %struct.foo* %P, %struct.foo** %P.addr
%tmp = load %struct.foo** %P.addr ; <%struct.foo*> [#uses=1]
%tmp1 = bitcast %struct.foo* %retval to i8* ; <i8*> [#uses=1]
%tmp2 = bitcast %struct.foo* %tmp to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 16, i32 8, i1 false)
%0 = load %struct.foo* %retval ; <%struct.foo> [#uses=1]
ret %struct.foo %0
}
That memcpy is completely terrible, but I don't know how to fix it.
llvm-svn: 109729
2010-07-29 12:46:19 +08:00
|
|
|
struct foo26 f26(struct foo26 *P) {
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK: define{{.*}} { i32*, float* } @f26(%struct.foo26* noundef %P)
|
now that we have CGT around, we can start using preferred types
for return values too. Instead of compiling something like:
struct foo {
int *X;
float *Y;
};
struct foo test(struct foo *P) { return *P; }
to:
%1 = type { i64, i64 }
define %1 @test(%struct.foo* %P) nounwind {
entry:
%retval = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=2]
%P.addr = alloca %struct.foo*, align 8 ; <%struct.foo**> [#uses=2]
store %struct.foo* %P, %struct.foo** %P.addr
%tmp = load %struct.foo** %P.addr ; <%struct.foo*> [#uses=1]
%tmp1 = bitcast %struct.foo* %retval to i8* ; <i8*> [#uses=1]
%tmp2 = bitcast %struct.foo* %tmp to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 16, i32 8, i1 false)
%0 = bitcast %struct.foo* %retval to %1* ; <%1*> [#uses=1]
%1 = load %1* %0, align 1 ; <%1> [#uses=1]
ret %1 %1
}
We now get the result more type safe, with:
define %struct.foo @test(%struct.foo* %P) nounwind {
entry:
%retval = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=2]
%P.addr = alloca %struct.foo*, align 8 ; <%struct.foo**> [#uses=2]
store %struct.foo* %P, %struct.foo** %P.addr
%tmp = load %struct.foo** %P.addr ; <%struct.foo*> [#uses=1]
%tmp1 = bitcast %struct.foo* %retval to i8* ; <i8*> [#uses=1]
%tmp2 = bitcast %struct.foo* %tmp to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 16, i32 8, i1 false)
%0 = load %struct.foo* %retval ; <%struct.foo> [#uses=1]
ret %struct.foo %0
}
That memcpy is completely terrible, but I don't know how to fix it.
llvm-svn: 109729
2010-07-29 12:46:19 +08:00
|
|
|
return *P;
|
|
|
|
}
|
2010-07-29 13:02:29 +08:00
|
|
|
|
|
|
|
|
|
|
|
struct v4f32wrapper {
|
|
|
|
v4f32 v;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct v4f32wrapper f27(struct v4f32wrapper X) {
|
2020-12-31 16:27:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} <4 x float> @f27(<4 x float> %X.coerce)
|
2010-07-29 13:02:29 +08:00
|
|
|
return X;
|
implement a todo: pass a eight-byte that consists of a
small integer + padding as that small integer. On code
like:
struct c { double x; int y; };
void bar(struct c C) { }
This means that we compile to:
define void @bar(double %C.coerce0, i32 %C.coerce1) nounwind {
entry:
%C = alloca %struct.c, align 8 ; <%struct.c*> [#uses=2]
%0 = getelementptr %struct.c* %C, i32 0, i32 0 ; <double*> [#uses=1]
store double %C.coerce0, double* %0
%1 = getelementptr %struct.c* %C, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 %C.coerce1, i32* %1
instead of:
define void @bar(double %C.coerce0, i64 %C.coerce1) nounwind {
entry:
%C = alloca %struct.c, align 8 ; <%struct.c*> [#uses=3]
%0 = bitcast %struct.c* %C to %0* ; <%0*> [#uses=2]
%1 = getelementptr %0* %0, i32 0, i32 0 ; <double*> [#uses=1]
store double %C.coerce0, double* %1
%2 = getelementptr %0* %0, i32 0, i32 1 ; <i64*> [#uses=1]
store i64 %C.coerce1, i64* %2
which gives SRoA heartburn.
This implements rdar://5711709, a nice low number :)
llvm-svn: 109737
2010-07-29 15:30:00 +08:00
|
|
|
}
|
|
|
|
|
2015-02-17 01:26:51 +08:00
|
|
|
// PR22563 - We should unwrap simple structs and arrays to pass
|
|
|
|
// and return them in the appropriate vector registers if possible.
|
|
|
|
|
|
|
|
typedef float v8f32 __attribute__((__vector_size__(32)));
|
|
|
|
struct v8f32wrapper {
|
|
|
|
v8f32 v;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct v8f32wrapper f27a(struct v8f32wrapper X) {
|
2020-12-31 16:27:11 +08:00
|
|
|
// AVX-LABEL: define{{.*}} <8 x float> @f27a(<8 x float> %X.coerce)
|
2015-02-17 01:26:51 +08:00
|
|
|
return X;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct v8f32wrapper_wrapper {
|
|
|
|
v8f32 v[1];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct v8f32wrapper_wrapper f27b(struct v8f32wrapper_wrapper X) {
|
2020-12-31 16:27:11 +08:00
|
|
|
// AVX-LABEL: define{{.*}} <8 x float> @f27b(<8 x float> %X.coerce)
|
2015-02-17 01:26:51 +08:00
|
|
|
return X;
|
|
|
|
}
|
|
|
|
|
implement a todo: pass a eight-byte that consists of a
small integer + padding as that small integer. On code
like:
struct c { double x; int y; };
void bar(struct c C) { }
This means that we compile to:
define void @bar(double %C.coerce0, i32 %C.coerce1) nounwind {
entry:
%C = alloca %struct.c, align 8 ; <%struct.c*> [#uses=2]
%0 = getelementptr %struct.c* %C, i32 0, i32 0 ; <double*> [#uses=1]
store double %C.coerce0, double* %0
%1 = getelementptr %struct.c* %C, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 %C.coerce1, i32* %1
instead of:
define void @bar(double %C.coerce0, i64 %C.coerce1) nounwind {
entry:
%C = alloca %struct.c, align 8 ; <%struct.c*> [#uses=3]
%0 = bitcast %struct.c* %C to %0* ; <%0*> [#uses=2]
%1 = getelementptr %0* %0, i32 0, i32 0 ; <double*> [#uses=1]
store double %C.coerce0, double* %1
%2 = getelementptr %0* %0, i32 0, i32 1 ; <i64*> [#uses=1]
store i64 %C.coerce1, i64* %2
which gives SRoA heartburn.
This implements rdar://5711709, a nice low number :)
llvm-svn: 109737
2010-07-29 15:30:00 +08:00
|
|
|
// rdar://5711709
|
|
|
|
struct f28c {
|
|
|
|
double x;
|
|
|
|
int y;
|
|
|
|
};
|
|
|
|
void f28(struct f28c C) {
|
2020-12-31 16:27:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} void @f28(double %C.coerce0, i32 %C.coerce1)
|
implement a todo: pass a eight-byte that consists of a
small integer + padding as that small integer. On code
like:
struct c { double x; int y; };
void bar(struct c C) { }
This means that we compile to:
define void @bar(double %C.coerce0, i32 %C.coerce1) nounwind {
entry:
%C = alloca %struct.c, align 8 ; <%struct.c*> [#uses=2]
%0 = getelementptr %struct.c* %C, i32 0, i32 0 ; <double*> [#uses=1]
store double %C.coerce0, double* %0
%1 = getelementptr %struct.c* %C, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 %C.coerce1, i32* %1
instead of:
define void @bar(double %C.coerce0, i64 %C.coerce1) nounwind {
entry:
%C = alloca %struct.c, align 8 ; <%struct.c*> [#uses=3]
%0 = bitcast %struct.c* %C to %0* ; <%0*> [#uses=2]
%1 = getelementptr %0* %0, i32 0, i32 0 ; <double*> [#uses=1]
store double %C.coerce0, double* %1
%2 = getelementptr %0* %0, i32 0, i32 1 ; <i64*> [#uses=1]
store i64 %C.coerce1, i64* %2
which gives SRoA heartburn.
This implements rdar://5711709, a nice low number :)
llvm-svn: 109737
2010-07-29 15:30:00 +08:00
|
|
|
}
|
|
|
|
|
This is a little bit far, but optimize cases like:
struct a {
struct c {
double x;
int y;
} x[1];
};
void foo(struct a A) {
}
into:
define void @foo(double %A.coerce0, i32 %A.coerce1) nounwind {
entry:
%A = alloca %struct.a, align 8 ; <%struct.a*> [#uses=1]
%0 = bitcast %struct.a* %A to %struct.c* ; <%struct.c*> [#uses=2]
%1 = getelementptr %struct.c* %0, i32 0, i32 0 ; <double*> [#uses=1]
store double %A.coerce0, double* %1
%2 = getelementptr %struct.c* %0, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 %A.coerce1, i32* %2
instead of:
define void @foo(double %A.coerce0, i64 %A.coerce1) nounwind {
entry:
%A = alloca %struct.a, align 8 ; <%struct.a*> [#uses=1]
%0 = bitcast %struct.a* %A to %0* ; <%0*> [#uses=2]
%1 = getelementptr %0* %0, i32 0, i32 0 ; <double*> [#uses=1]
store double %A.coerce0, double* %1
%2 = getelementptr %0* %0, i32 0, i32 1 ; <i64*> [#uses=1]
store i64 %A.coerce1, i64* %2
I only do this now because I never want to look at this code again :)
llvm-svn: 109738
2010-07-29 15:43:55 +08:00
|
|
|
struct f29a {
|
|
|
|
struct c {
|
|
|
|
double x;
|
|
|
|
int y;
|
|
|
|
} x[1];
|
|
|
|
};
|
|
|
|
|
|
|
|
void f29a(struct f29a A) {
|
2020-12-31 16:27:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} void @f29a(double %A.coerce0, i32 %A.coerce1)
|
This is a little bit far, but optimize cases like:
struct a {
struct c {
double x;
int y;
} x[1];
};
void foo(struct a A) {
}
into:
define void @foo(double %A.coerce0, i32 %A.coerce1) nounwind {
entry:
%A = alloca %struct.a, align 8 ; <%struct.a*> [#uses=1]
%0 = bitcast %struct.a* %A to %struct.c* ; <%struct.c*> [#uses=2]
%1 = getelementptr %struct.c* %0, i32 0, i32 0 ; <double*> [#uses=1]
store double %A.coerce0, double* %1
%2 = getelementptr %struct.c* %0, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 %A.coerce1, i32* %2
instead of:
define void @foo(double %A.coerce0, i64 %A.coerce1) nounwind {
entry:
%A = alloca %struct.a, align 8 ; <%struct.a*> [#uses=1]
%0 = bitcast %struct.a* %A to %0* ; <%0*> [#uses=2]
%1 = getelementptr %0* %0, i32 0, i32 0 ; <double*> [#uses=1]
store double %A.coerce0, double* %1
%2 = getelementptr %0* %0, i32 0, i32 1 ; <i64*> [#uses=1]
store i64 %A.coerce1, i64* %2
I only do this now because I never want to look at this code again :)
llvm-svn: 109738
2010-07-29 15:43:55 +08:00
|
|
|
}
|
2010-07-30 01:34:39 +08:00
|
|
|
|
|
|
|
// rdar://8249586
|
|
|
|
struct S0 { char f0[8]; char f2; char f3; char f4; };
|
|
|
|
void f30(struct S0 p_4) {
|
2020-12-31 16:27:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} void @f30(i64 %p_4.coerce0, i24 %p_4.coerce1)
|
2010-07-30 01:34:39 +08:00
|
|
|
}
|
fix rdar://8251384, another case where we could access beyond the
end of a struct. This improves the case when the struct being passed
contains 3 floats, either due to a struct or array of 3 things. Before
we'd generate this IR for the testcase:
define float @bar(double %X.coerce0, double %X.coerce1) nounwind {
entry:
%X = alloca %struct.foof, align 8 ; <%struct.foof*> [#uses=2]
%0 = bitcast %struct.foof* %X to %1* ; <%1*> [#uses=2]
%1 = getelementptr %1* %0, i32 0, i32 0 ; <double*> [#uses=1]
store double %X.coerce0, double* %1
%2 = getelementptr %1* %0, i32 0, i32 1 ; <double*> [#uses=1]
store double %X.coerce1, double* %2
%tmp = getelementptr inbounds %struct.foof* %X, i32 0, i32 2 ; <float*> [#uses=1]
%tmp1 = load float* %tmp ; <float> [#uses=1]
ret float %tmp1
}
which compiled (with optimization) to:
_bar: ## @bar
## BB#0: ## %entry
movd %xmm1, %rax
movd %eax, %xmm0
ret
Now we produce:
define float @bar(double %X.coerce0, float %X.coerce1) nounwind {
entry:
%X = alloca %struct.foof, align 8 ; <%struct.foof*> [#uses=2]
%0 = bitcast %struct.foof* %X to %0* ; <%0*> [#uses=2]
%1 = getelementptr %0* %0, i32 0, i32 0 ; <double*> [#uses=1]
store double %X.coerce0, double* %1
%2 = getelementptr %0* %0, i32 0, i32 1 ; <float*> [#uses=1]
store float %X.coerce1, float* %2
%tmp = getelementptr inbounds %struct.foof* %X, i32 0, i32 2 ; <float*> [#uses=1]
%tmp1 = load float* %tmp ; <float> [#uses=1]
ret float %tmp1
}
and:
_bar: ## @bar
## BB#0: ## %entry
movaps %xmm1, %xmm0
ret
llvm-svn: 109776
2010-07-30 02:13:09 +08:00
|
|
|
|
|
|
|
// Pass the third element as a float when followed by tail padding.
|
|
|
|
// rdar://8251384
|
|
|
|
struct f31foo { float a, b, c; };
|
|
|
|
float f31(struct f31foo X) {
|
2020-12-31 16:27:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} float @f31(<2 x float> %X.coerce0, float %X.coerce1)
|
fix rdar://8251384, another case where we could access beyond the
end of a struct. This improves the case when the struct being passed
contains 3 floats, either due to a struct or array of 3 things. Before
we'd generate this IR for the testcase:
define float @bar(double %X.coerce0, double %X.coerce1) nounwind {
entry:
%X = alloca %struct.foof, align 8 ; <%struct.foof*> [#uses=2]
%0 = bitcast %struct.foof* %X to %1* ; <%1*> [#uses=2]
%1 = getelementptr %1* %0, i32 0, i32 0 ; <double*> [#uses=1]
store double %X.coerce0, double* %1
%2 = getelementptr %1* %0, i32 0, i32 1 ; <double*> [#uses=1]
store double %X.coerce1, double* %2
%tmp = getelementptr inbounds %struct.foof* %X, i32 0, i32 2 ; <float*> [#uses=1]
%tmp1 = load float* %tmp ; <float> [#uses=1]
ret float %tmp1
}
which compiled (with optimization) to:
_bar: ## @bar
## BB#0: ## %entry
movd %xmm1, %rax
movd %eax, %xmm0
ret
Now we produce:
define float @bar(double %X.coerce0, float %X.coerce1) nounwind {
entry:
%X = alloca %struct.foof, align 8 ; <%struct.foof*> [#uses=2]
%0 = bitcast %struct.foof* %X to %0* ; <%0*> [#uses=2]
%1 = getelementptr %0* %0, i32 0, i32 0 ; <double*> [#uses=1]
store double %X.coerce0, double* %1
%2 = getelementptr %0* %0, i32 0, i32 1 ; <float*> [#uses=1]
store float %X.coerce1, float* %2
%tmp = getelementptr inbounds %struct.foof* %X, i32 0, i32 2 ; <float*> [#uses=1]
%tmp1 = load float* %tmp ; <float> [#uses=1]
ret float %tmp1
}
and:
_bar: ## @bar
## BB#0: ## %entry
movaps %xmm1, %xmm0
ret
llvm-svn: 109776
2010-07-30 02:13:09 +08:00
|
|
|
return X.c;
|
|
|
|
}
|
|
|
|
|
Finally pass "two floats in a 64-bit unit" as a <2 x float> instead of
as a double in the x86-64 ABI. This allows us to generate much better
code for certain things, e.g.:
_Complex float f32(_Complex float A, _Complex float B) {
return A+B;
}
Used to compile into (look at the integer silliness!):
_f32: ## @f32
## BB#0: ## %entry
movd %xmm1, %rax
movd %eax, %xmm1
movd %xmm0, %rcx
movd %ecx, %xmm0
addss %xmm1, %xmm0
movd %xmm0, %edx
shrq $32, %rax
movd %eax, %xmm0
shrq $32, %rcx
movd %ecx, %xmm1
addss %xmm0, %xmm1
movd %xmm1, %eax
shlq $32, %rax
addq %rdx, %rax
movd %rax, %xmm0
ret
Now we get:
_f32: ## @f32
movdqa %xmm0, %xmm2
addss %xmm1, %xmm2
pshufd $16, %xmm2, %xmm2
pshufd $1, %xmm1, %xmm1
pshufd $1, %xmm0, %xmm0
addss %xmm1, %xmm0
pshufd $16, %xmm0, %xmm1
movdqa %xmm2, %xmm0
unpcklps %xmm1, %xmm0
ret
and compile stuff like:
extern float _Complex ccoshf( float _Complex ) ;
float _Complex ccosf ( float _Complex z ) {
float _Complex iz;
(__real__ iz) = -(__imag__ z);
(__imag__ iz) = (__real__ z);
return ccoshf(iz);
}
into:
_ccosf: ## @ccosf
## BB#0: ## %entry
pshufd $1, %xmm0, %xmm1
xorps LCPI4_0(%rip), %xmm1
unpcklps %xmm0, %xmm1
movaps %xmm1, %xmm0
jmp _ccoshf ## TAILCALL
instead of:
_ccosf: ## @ccosf
## BB#0: ## %entry
movd %xmm0, %rax
movq %rax, %rcx
shlq $32, %rcx
shrq $32, %rax
xorl $-2147483648, %eax ## imm = 0xFFFFFFFF80000000
addq %rcx, %rax
movd %rax, %xmm0
jmp _ccoshf ## TAILCALL
There is still "stuff to be done" here for the struct case,
but this resolves rdar://6379669 - [x86-64 ABI] Pass and return
_Complex float / double efficiently
llvm-svn: 112111
2010-08-26 07:39:14 +08:00
|
|
|
_Complex float f32(_Complex float A, _Complex float B) {
|
|
|
|
// rdar://6379669
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} <2 x float> @f32(<2 x float> noundef %A.coerce, <2 x float> noundef %B.coerce)
|
Finally pass "two floats in a 64-bit unit" as a <2 x float> instead of
as a double in the x86-64 ABI. This allows us to generate much better
code for certain things, e.g.:
_Complex float f32(_Complex float A, _Complex float B) {
return A+B;
}
Used to compile into (look at the integer silliness!):
_f32: ## @f32
## BB#0: ## %entry
movd %xmm1, %rax
movd %eax, %xmm1
movd %xmm0, %rcx
movd %ecx, %xmm0
addss %xmm1, %xmm0
movd %xmm0, %edx
shrq $32, %rax
movd %eax, %xmm0
shrq $32, %rcx
movd %ecx, %xmm1
addss %xmm0, %xmm1
movd %xmm1, %eax
shlq $32, %rax
addq %rdx, %rax
movd %rax, %xmm0
ret
Now we get:
_f32: ## @f32
movdqa %xmm0, %xmm2
addss %xmm1, %xmm2
pshufd $16, %xmm2, %xmm2
pshufd $1, %xmm1, %xmm1
pshufd $1, %xmm0, %xmm0
addss %xmm1, %xmm0
pshufd $16, %xmm0, %xmm1
movdqa %xmm2, %xmm0
unpcklps %xmm1, %xmm0
ret
and compile stuff like:
extern float _Complex ccoshf( float _Complex ) ;
float _Complex ccosf ( float _Complex z ) {
float _Complex iz;
(__real__ iz) = -(__imag__ z);
(__imag__ iz) = (__real__ z);
return ccoshf(iz);
}
into:
_ccosf: ## @ccosf
## BB#0: ## %entry
pshufd $1, %xmm0, %xmm1
xorps LCPI4_0(%rip), %xmm1
unpcklps %xmm0, %xmm1
movaps %xmm1, %xmm0
jmp _ccoshf ## TAILCALL
instead of:
_ccosf: ## @ccosf
## BB#0: ## %entry
movd %xmm0, %rax
movq %rax, %rcx
shlq $32, %rcx
shrq $32, %rax
xorl $-2147483648, %eax ## imm = 0xFFFFFFFF80000000
addq %rcx, %rax
movd %rax, %xmm0
jmp _ccoshf ## TAILCALL
There is still "stuff to be done" here for the struct case,
but this resolves rdar://6379669 - [x86-64 ABI] Pass and return
_Complex float / double efficiently
llvm-svn: 112111
2010-08-26 07:39:14 +08:00
|
|
|
return A+B;
|
|
|
|
}
|
|
|
|
|
fix rdar://8251384, another case where we could access beyond the
end of a struct. This improves the case when the struct being passed
contains 3 floats, either due to a struct or array of 3 things. Before
we'd generate this IR for the testcase:
define float @bar(double %X.coerce0, double %X.coerce1) nounwind {
entry:
%X = alloca %struct.foof, align 8 ; <%struct.foof*> [#uses=2]
%0 = bitcast %struct.foof* %X to %1* ; <%1*> [#uses=2]
%1 = getelementptr %1* %0, i32 0, i32 0 ; <double*> [#uses=1]
store double %X.coerce0, double* %1
%2 = getelementptr %1* %0, i32 0, i32 1 ; <double*> [#uses=1]
store double %X.coerce1, double* %2
%tmp = getelementptr inbounds %struct.foof* %X, i32 0, i32 2 ; <float*> [#uses=1]
%tmp1 = load float* %tmp ; <float> [#uses=1]
ret float %tmp1
}
which compiled (with optimization) to:
_bar: ## @bar
## BB#0: ## %entry
movd %xmm1, %rax
movd %eax, %xmm0
ret
Now we produce:
define float @bar(double %X.coerce0, float %X.coerce1) nounwind {
entry:
%X = alloca %struct.foof, align 8 ; <%struct.foof*> [#uses=2]
%0 = bitcast %struct.foof* %X to %0* ; <%0*> [#uses=2]
%1 = getelementptr %0* %0, i32 0, i32 0 ; <double*> [#uses=1]
store double %X.coerce0, double* %1
%2 = getelementptr %0* %0, i32 0, i32 1 ; <float*> [#uses=1]
store float %X.coerce1, float* %2
%tmp = getelementptr inbounds %struct.foof* %X, i32 0, i32 2 ; <float*> [#uses=1]
%tmp1 = load float* %tmp ; <float> [#uses=1]
ret float %tmp1
}
and:
_bar: ## @bar
## BB#0: ## %entry
movaps %xmm1, %xmm0
ret
llvm-svn: 109776
2010-07-30 02:13:09 +08:00
|
|
|
|
2010-08-26 14:28:35 +08:00
|
|
|
// rdar://8357396
|
|
|
|
struct f33s { long x; float c,d; };
|
|
|
|
|
|
|
|
void f33(va_list X) {
|
|
|
|
va_arg(X, struct f33s);
|
|
|
|
}
|
|
|
|
|
2010-08-27 02:03:20 +08:00
|
|
|
typedef unsigned long long v1i64 __attribute__((__vector_size__(8)));
|
|
|
|
|
|
|
|
// rdar://8359248
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} double @f34(double noundef %arg.coerce)
|
2010-08-27 02:03:20 +08:00
|
|
|
v1i64 f34(v1i64 arg) { return arg; }
|
2010-08-26 14:28:35 +08:00
|
|
|
|
2010-08-27 02:13:50 +08:00
|
|
|
|
|
|
|
// rdar://8358475
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} double @f35(double noundef %arg.coerce)
|
2010-08-27 02:13:50 +08:00
|
|
|
typedef unsigned long v1i64_2 __attribute__((__vector_size__(8)));
|
|
|
|
v1i64_2 f35(v1i64_2 arg) { return arg+arg; }
|
|
|
|
|
2011-04-21 09:20:55 +08:00
|
|
|
// rdar://9122143
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK: declare void @func(%struct._str* noundef byval(%struct._str) align 16)
|
2011-04-21 09:20:55 +08:00
|
|
|
typedef struct _str {
|
|
|
|
union {
|
|
|
|
long double a;
|
|
|
|
long c;
|
|
|
|
};
|
|
|
|
} str;
|
|
|
|
|
|
|
|
void func(str s);
|
|
|
|
str ss;
|
2022-02-13 21:02:46 +08:00
|
|
|
void f9122143(void)
|
2011-04-21 09:20:55 +08:00
|
|
|
{
|
|
|
|
func(ss);
|
|
|
|
}
|
|
|
|
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} double @f36(double noundef %arg.coerce)
|
2011-07-02 08:57:27 +08:00
|
|
|
typedef unsigned v2i32 __attribute((__vector_size__(8)));
|
|
|
|
v2i32 f36(v2i32 arg) { return arg; }
|
2011-07-12 06:41:29 +08:00
|
|
|
|
2011-12-02 08:11:43 +08:00
|
|
|
// AVX: declare void @f38(<8 x float>)
|
2022-01-16 17:53:11 +08:00
|
|
|
// AVX: declare void @f37(<8 x float> noundef)
|
|
|
|
// SSE: declare void @f38(%struct.s256* noundef byval(%struct.s256) align 32)
|
|
|
|
// SSE: declare void @f37(<8 x float>* noundef byval(<8 x float>) align 32)
|
2011-07-12 06:41:29 +08:00
|
|
|
typedef float __m256 __attribute__ ((__vector_size__ (32)));
|
|
|
|
typedef struct {
|
|
|
|
__m256 m;
|
|
|
|
} s256;
|
|
|
|
|
|
|
|
s256 x38;
|
|
|
|
__m256 x37;
|
|
|
|
|
|
|
|
void f38(s256 x);
|
|
|
|
void f37(__m256 x);
|
2022-02-13 21:02:46 +08:00
|
|
|
void f39(void) { f38(x38); f37(x37); }
|
2011-07-12 06:41:29 +08:00
|
|
|
|
2011-07-12 09:27:38 +08:00
|
|
|
// The two next tests make sure that the struct below is passed
|
|
|
|
// in the same way regardless of avx being used
|
|
|
|
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK: declare void @func40(%struct.t128* noundef byval(%struct.t128) align 16)
|
2011-07-12 06:41:29 +08:00
|
|
|
typedef float __m128 __attribute__ ((__vector_size__ (16)));
|
2011-07-12 08:30:27 +08:00
|
|
|
typedef struct t128 {
|
2011-07-12 06:41:29 +08:00
|
|
|
__m128 m;
|
|
|
|
__m128 n;
|
|
|
|
} two128;
|
|
|
|
|
|
|
|
extern void func40(two128 s);
|
|
|
|
void func41(two128 s) {
|
|
|
|
func40(s);
|
|
|
|
}
|
|
|
|
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK: declare void @func42(%struct.t128_2* noundef byval(%struct.t128_2) align 16)
|
2011-07-12 09:27:38 +08:00
|
|
|
typedef struct xxx {
|
|
|
|
__m128 array[2];
|
|
|
|
} Atwo128;
|
|
|
|
typedef struct t128_2 {
|
|
|
|
Atwo128 x;
|
|
|
|
} SA;
|
|
|
|
|
|
|
|
extern void func42(SA s);
|
|
|
|
void func43(SA s) {
|
|
|
|
func42(s);
|
|
|
|
}
|
2011-11-18 10:44:19 +08:00
|
|
|
|
2020-12-31 16:27:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} i32 @f44
|
2011-11-18 10:44:19 +08:00
|
|
|
// CHECK: ptrtoint
|
2015-12-04 08:26:47 +08:00
|
|
|
// CHECK-NEXT: add i64 %{{[0-9]+}}, 31
|
|
|
|
// CHECK-NEXT: and i64 %{{[0-9]+}}, -32
|
2011-11-18 10:44:19 +08:00
|
|
|
// CHECK-NEXT: inttoptr
|
|
|
|
typedef int T44 __attribute((vector_size(32)));
|
|
|
|
struct s44 { T44 x; int y; };
|
|
|
|
int f44(int i, ...) {
|
|
|
|
__builtin_va_list ap;
|
|
|
|
__builtin_va_start(ap, i);
|
|
|
|
struct s44 s = __builtin_va_arg(ap, struct s44);
|
|
|
|
__builtin_va_end(ap);
|
|
|
|
return s.y;
|
|
|
|
}
|
2011-11-29 07:18:11 +08:00
|
|
|
|
|
|
|
// Text that vec3 returns the correct LLVM IR type.
|
2022-01-16 17:53:11 +08:00
|
|
|
// AVX-LABEL: define{{.*}} i32 @foo(<3 x i64> noundef %X)
|
2011-11-29 07:18:11 +08:00
|
|
|
typedef long long3 __attribute((ext_vector_type(3)));
|
|
|
|
int foo(long3 X)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2011-12-01 12:53:19 +08:00
|
|
|
|
|
|
|
// Make sure we don't use a varargs convention for a function without a
|
|
|
|
// prototype where AVX types are involved.
|
2011-12-02 08:11:43 +08:00
|
|
|
// AVX: @test45
|
|
|
|
// AVX: call i32 bitcast (i32 (...)* @f45 to i32 (<8 x float>)*)
|
2011-12-01 12:53:19 +08:00
|
|
|
int f45();
|
|
|
|
__m256 x45;
|
2022-02-13 21:02:46 +08:00
|
|
|
void test45(void) { f45(x45); }
|
2011-12-02 08:11:43 +08:00
|
|
|
|
|
|
|
// Make sure we use byval to pass 64-bit vectors in memory; the LLVM call
|
|
|
|
// lowering can't handle this case correctly because it runs after legalization.
|
|
|
|
// CHECK: @test46
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK: call void @f46({{.*}}<2 x float>* noundef byval(<2 x float>) align 8 {{.*}}, <2 x float>* noundef byval(<2 x float>) align 8 {{.*}})
|
2011-12-02 08:11:43 +08:00
|
|
|
typedef float v46 __attribute((vector_size(8)));
|
|
|
|
void f46(v46,v46,v46,v46,v46,v46,v46,v46,v46,v46);
|
2022-02-13 21:02:46 +08:00
|
|
|
void test46(void) { v46 x = {1,2}; f46(x,x,x,x,x,x,x,x,x,x); }
|
IRgen/ABI/x86_64: Avoid passing small structs using byval sometimes.
- We do this when it is easy to determine that the backend will pass them on
the stack properly by itself.
Currently LLVM codegen is really bad in some cases with byval, for example, on
the test case here (which is derived from Sema code, which likes to pass
SourceLocations around)::
struct s47 { unsigned a; };
void f47(int,int,int,int,int,int,struct s47);
void test47(int a, struct s47 b) { f47(a, a, a, a, a, a, b); }
we used to emit code like this::
...
movl %esi, -8(%rbp)
movl -8(%rbp), %ecx
movl %ecx, (%rsp)
...
to handle moving the struct onto the stack, which is just appalling.
Now we generate::
movl %esi, (%rsp)
which seems better, no?
llvm-svn: 152462
2012-03-10 09:03:58 +08:00
|
|
|
|
|
|
|
// Check that we pass the struct below without using byval, which helps out
|
|
|
|
// codegen.
|
|
|
|
//
|
|
|
|
// CHECK: @test47
|
|
|
|
// CHECK: call void @f47(i32 {{.*}}, i32 {{.*}}, i32 {{.*}}, i32 {{.*}}, i32 {{.*}}, i32 {{.*}}, i32 {{.*}})
|
|
|
|
struct s47 { unsigned a; };
|
|
|
|
void f47(int,int,int,int,int,int,struct s47);
|
|
|
|
void test47(int a, struct s47 b) { f47(a, a, a, a, a, a, b); }
|
2012-11-29 06:08:52 +08:00
|
|
|
|
|
|
|
// rdar://12723368
|
|
|
|
// In the following example, there are holes in T4 at the 3rd byte and the 4th
|
|
|
|
// byte, however, T2 does not have those holes. T4 is chosen to be the
|
|
|
|
// representing type for union T1, but we can't use load or store of T4 since
|
|
|
|
// it will skip the 3rd byte and the 4th byte.
|
|
|
|
// In general, Since we don't accurately represent the data fields of a union,
|
|
|
|
// do not use load or store of the representing llvm type for the union.
|
|
|
|
typedef _Complex int T2;
|
|
|
|
typedef _Complex char T5;
|
|
|
|
typedef _Complex int T7;
|
|
|
|
typedef struct T4 { T5 field0; T7 field1; } T4;
|
|
|
|
typedef union T1 { T2 field0; T4 field1; } T1;
|
|
|
|
extern T1 T1_retval;
|
|
|
|
T1 test48(void) {
|
|
|
|
// CHECK: @test48
|
2012-11-29 06:29:41 +08:00
|
|
|
// CHECK: memcpy
|
|
|
|
// CHECK: memcpy
|
2012-11-29 06:08:52 +08:00
|
|
|
return T1_retval;
|
|
|
|
}
|
Fix the required args count for variadic blocks.
We were emitting calls to blocks as if all arguments were
required --- i.e. with signature (A,B,C,D,...) rather than
(A,B,...). This patch fixes that and accounts for the
implicit block-context argument as a required argument.
In addition, this patch changes the function type under which
we call unprototyped functions on platforms like x86-64 that
guarantee compatibility of variadic functions with unprototyped
function types; previously we would always call such functions
under the LLVM type T (...)*, but now we will call them under
the type T (A,B,C,D,...)*. This last change should have no
material effect except for making the type conventions more
explicit; it was a side-effect of the most convenient implementation.
llvm-svn: 169588
2012-12-07 15:03:17 +08:00
|
|
|
|
|
|
|
void test49_helper(double, ...);
|
|
|
|
void test49(double d, double e) {
|
|
|
|
test49_helper(d, e);
|
|
|
|
}
|
2020-12-31 16:27:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} void @test49(
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: [[T0:%.*]] = load double, double*
|
|
|
|
// CHECK-NEXT: [[T1:%.*]] = load double, double*
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-NEXT: call void (double, ...) @test49_helper(double noundef [[T0]], double noundef [[T1]])
|
Fix the required args count for variadic blocks.
We were emitting calls to blocks as if all arguments were
required --- i.e. with signature (A,B,C,D,...) rather than
(A,B,...). This patch fixes that and accounts for the
implicit block-context argument as a required argument.
In addition, this patch changes the function type under which
we call unprototyped functions on platforms like x86-64 that
guarantee compatibility of variadic functions with unprototyped
function types; previously we would always call such functions
under the LLVM type T (...)*, but now we will call them under
the type T (A,B,C,D,...)*. This last change should have no
material effect except for making the type conventions more
explicit; it was a side-effect of the most convenient implementation.
llvm-svn: 169588
2012-12-07 15:03:17 +08:00
|
|
|
|
|
|
|
void test50_helper();
|
|
|
|
void test50(double d, double e) {
|
|
|
|
test50_helper(d, e);
|
|
|
|
}
|
2020-12-31 16:27:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} void @test50(
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: [[T0:%.*]] = load double, double*
|
|
|
|
// CHECK-NEXT: [[T1:%.*]] = load double, double*
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK-NEXT: call void (double, double, ...) bitcast (void (...)* @test50_helper to void (double, double, ...)*)(double noundef [[T0]], double noundef [[T1]])
|
2013-06-08 07:20:55 +08:00
|
|
|
|
|
|
|
struct test51_s { __uint128_t intval; };
|
|
|
|
void test51(struct test51_s *s, __builtin_va_list argList) {
|
|
|
|
*s = __builtin_va_arg(argList, struct test51_s);
|
|
|
|
}
|
|
|
|
|
2020-12-31 16:27:11 +08:00
|
|
|
// CHECK-LABEL: define{{.*}} void @test51
|
2013-06-11 09:59:28 +08:00
|
|
|
// CHECK: [[TMP_ADDR:%.*]] = alloca [[STRUCT_TEST51:%.*]], align 16
|
|
|
|
// CHECK: br i1
|
|
|
|
// CHECK: [[REG_SAVE_AREA_PTR:%.*]] = getelementptr inbounds {{.*}}, i32 0, i32 3
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK-NEXT: [[REG_SAVE_AREA:%.*]] = load i8*, i8** [[REG_SAVE_AREA_PTR]]
|
2015-02-28 03:18:17 +08:00
|
|
|
// CHECK-NEXT: [[VALUE_ADDR:%.*]] = getelementptr i8, i8* [[REG_SAVE_AREA]], i32 {{.*}}
|
2013-06-11 09:59:28 +08:00
|
|
|
// CHECK-NEXT: [[CASTED_VALUE_ADDR:%.*]] = bitcast i8* [[VALUE_ADDR]] to [[STRUCT_TEST51]]
|
|
|
|
// CHECK-NEXT: [[CASTED_TMP_ADDR:%.*]] = bitcast [[STRUCT_TEST51]]* [[TMP_ADDR]] to i8*
|
|
|
|
// CHECK-NEXT: [[RECASTED_VALUE_ADDR:%.*]] = bitcast [[STRUCT_TEST51]]* [[CASTED_VALUE_ADDR]] to i8*
|
Change memcpy/memove/memset to have dest and source alignment attributes.
Summary:
This change is step three in the series of changes to remove alignment argument from
memcpy/memmove/memset in favour of alignment attributes. Steps:
Step 1) Remove alignment parameter and create alignment parameter attributes for
memcpy/memmove/memset. ( rL322965, rC322964, rL322963 )
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments. ( rL323597 )
Step 3) Update Clang to use the new IRBuilder API.
Step 4) Update Polly to use the new IRBuilder API.
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use getDestAlignment()
and getSourceAlignment() instead.
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reference
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
Reviewers: rjmccall
Subscribers: jyknight, nemanjai, nhaehnle, javed.absar, sbc100, aheejin, kbarton, fedor.sergeev, cfe-commits
Differential Revision: https://reviews.llvm.org/D41677
llvm-svn: 323617
2018-01-29 01:27:45 +08:00
|
|
|
// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[CASTED_TMP_ADDR]], i8* align 8 [[RECASTED_VALUE_ADDR]], i64 16, i1 false)
|
2013-06-11 09:59:28 +08:00
|
|
|
// CHECK-NEXT: add i32 {{.*}}, 16
|
|
|
|
// CHECK-NEXT: store i32 {{.*}}, i32* {{.*}}
|
|
|
|
// CHECK-NEXT: br label
|
2013-06-12 08:13:45 +08:00
|
|
|
|
|
|
|
void test52_helper(int, ...);
|
|
|
|
__m256 x52;
|
2022-02-13 21:02:46 +08:00
|
|
|
void test52(void) {
|
2013-06-12 08:13:45 +08:00
|
|
|
test52_helper(0, x52, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0i);
|
|
|
|
}
|
2022-01-16 17:53:11 +08:00
|
|
|
// AVX: @test52_helper(i32 noundef 0, <8 x float> noundef {{%[a-zA-Z0-9]+}}, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef {{%[a-zA-Z0-9]+}}, double noundef {{%[a-zA-Z0-9]+}})
|
2013-06-12 08:13:45 +08:00
|
|
|
|
|
|
|
void test53(__m256 *m, __builtin_va_list argList) {
|
|
|
|
*m = __builtin_va_arg(argList, __m256);
|
|
|
|
}
|
2020-12-31 16:27:11 +08:00
|
|
|
// AVX-LABEL: define{{.*}} void @test53
|
2013-06-12 08:13:45 +08:00
|
|
|
// AVX-NOT: br i1
|
|
|
|
// AVX: ret void
|
|
|
|
|
|
|
|
void test54_helper(__m256, ...);
|
|
|
|
__m256 x54;
|
2022-02-13 21:02:46 +08:00
|
|
|
void test54(void) {
|
2013-06-12 08:13:45 +08:00
|
|
|
test54_helper(x54, x54, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0i);
|
|
|
|
test54_helper(x54, x54, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0i);
|
|
|
|
}
|
2022-01-16 17:53:11 +08:00
|
|
|
// AVX: @test54_helper(<8 x float> noundef {{%[a-zA-Z0-9]+}}, <8 x float> noundef {{%[a-zA-Z0-9]+}}, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef {{%[a-zA-Z0-9]+}}, double noundef {{%[a-zA-Z0-9]+}})
|
|
|
|
// AVX: @test54_helper(<8 x float> noundef {{%[a-zA-Z0-9]+}}, <8 x float> noundef {{%[a-zA-Z0-9]+}}, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef 1.000000e+00, { double, double }* noundef byval({ double, double }) align 8 {{%[^)]+}})
|
2015-06-23 05:31:43 +08:00
|
|
|
|
|
|
|
typedef float __m512 __attribute__ ((__vector_size__ (64)));
|
|
|
|
typedef struct {
|
|
|
|
__m512 m;
|
|
|
|
} s512;
|
|
|
|
|
|
|
|
s512 x55;
|
|
|
|
__m512 x56;
|
|
|
|
|
2016-08-15 14:39:18 +08:00
|
|
|
// On AVX512, aggregates which contain a __m512 type are classified as SSE/SSEUP
|
|
|
|
// as per https://github.com/hjl-tools/x86-psABI/commit/30f9c9 3.2.3p2 Rule 1
|
2015-06-23 05:31:43 +08:00
|
|
|
//
|
2016-08-15 14:39:18 +08:00
|
|
|
// AVX512: declare void @f55(<16 x float>)
|
2022-01-16 17:53:11 +08:00
|
|
|
// NO-AVX512: declare void @f55(%struct.s512* noundef byval(%struct.s512) align 64)
|
2015-06-23 05:31:43 +08:00
|
|
|
void f55(s512 x);
|
|
|
|
|
2016-08-15 14:39:18 +08:00
|
|
|
// __m512 has type SSE/SSEUP on AVX512.
|
2015-06-23 05:31:43 +08:00
|
|
|
//
|
2022-01-16 17:53:11 +08:00
|
|
|
// AVX512: declare void @f56(<16 x float> noundef)
|
|
|
|
// NO-AVX512: declare void @f56(<16 x float>* noundef byval(<16 x float>) align 64)
|
2015-06-23 05:31:43 +08:00
|
|
|
void f56(__m512 x);
|
2022-02-13 21:02:46 +08:00
|
|
|
void f57(void) { f55(x55); f56(x56); }
|
2015-06-23 05:31:43 +08:00
|
|
|
|
|
|
|
// Like for __m128 on AVX, check that the struct below is passed
|
|
|
|
// in the same way regardless of AVX512 being used.
|
|
|
|
//
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK: declare void @f58(%struct.t256* noundef byval(%struct.t256) align 32)
|
2015-06-23 05:31:43 +08:00
|
|
|
typedef struct t256 {
|
|
|
|
__m256 m;
|
|
|
|
__m256 n;
|
|
|
|
} two256;
|
|
|
|
|
|
|
|
extern void f58(two256 s);
|
|
|
|
void f59(two256 s) {
|
|
|
|
f58(s);
|
|
|
|
}
|
|
|
|
|
2022-01-16 17:53:11 +08:00
|
|
|
// CHECK: declare void @f60(%struct.sat256* noundef byval(%struct.sat256) align 32)
|
2015-06-23 05:31:43 +08:00
|
|
|
typedef struct at256 {
|
|
|
|
__m256 array[2];
|
|
|
|
} Atwo256;
|
|
|
|
typedef struct sat256 {
|
|
|
|
Atwo256 x;
|
|
|
|
} SAtwo256;
|
|
|
|
|
|
|
|
extern void f60(SAtwo256 s);
|
|
|
|
void f61(SAtwo256 s) {
|
|
|
|
f60(s);
|
|
|
|
}
|
|
|
|
|
2022-01-16 17:53:11 +08:00
|
|
|
// AVX512: @f62_helper(i32 noundef 0, <16 x float> noundef {{%[a-zA-Z0-9]+}}, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef {{%[a-zA-Z0-9]+}}, double noundef {{%[a-zA-Z0-9]+}})
|
2015-06-23 05:31:43 +08:00
|
|
|
void f62_helper(int, ...);
|
|
|
|
__m512 x62;
|
2022-02-13 21:02:46 +08:00
|
|
|
void f62(void) {
|
2015-06-23 05:31:43 +08:00
|
|
|
f62_helper(0, x62, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0i);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Like for __m256 on AVX, we always pass __m512 in memory, and don't
|
|
|
|
// need to use the register save area.
|
|
|
|
//
|
2020-12-31 16:27:11 +08:00
|
|
|
// AVX512-LABEL: define{{.*}} void @f63
|
2015-06-23 05:31:43 +08:00
|
|
|
// AVX512-NOT: br i1
|
|
|
|
// AVX512: ret void
|
|
|
|
void f63(__m512 *m, __builtin_va_list argList) {
|
|
|
|
*m = __builtin_va_arg(argList, __m512);
|
|
|
|
}
|
|
|
|
|
2022-01-16 17:53:11 +08:00
|
|
|
// AVX512: @f64_helper(<16 x float> noundef {{%[a-zA-Z0-9]+}}, <16 x float> noundef {{%[a-zA-Z0-9]+}}, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef {{%[a-zA-Z0-9]+}}, double noundef {{%[a-zA-Z0-9]+}})
|
|
|
|
// AVX512: @f64_helper(<16 x float> noundef {{%[a-zA-Z0-9]+}}, <16 x float> noundef {{%[a-zA-Z0-9]+}}, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef 1.000000e+00, double noundef 1.000000e+00, { double, double }* noundef byval({ double, double }) align 8 {{%[^)]+}})
|
2015-06-23 05:31:43 +08:00
|
|
|
void f64_helper(__m512, ...);
|
|
|
|
__m512 x64;
|
2022-02-13 21:02:46 +08:00
|
|
|
void f64(void) {
|
2015-06-23 05:31:43 +08:00
|
|
|
f64_helper(x64, x64, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0i);
|
|
|
|
f64_helper(x64, x64, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0i);
|
|
|
|
}
|
2016-08-15 15:20:40 +08:00
|
|
|
|
|
|
|
struct t65 {
|
|
|
|
__m256 m;
|
|
|
|
int : 0;
|
|
|
|
};
|
2022-01-16 17:53:11 +08:00
|
|
|
// SSE-LABEL: @f65(%struct.t65* noundef byval(%struct.t65) align 32 %{{[^,)]+}})
|
2016-08-15 15:20:40 +08:00
|
|
|
// AVX: @f65(<8 x float> %{{[^,)]+}})
|
|
|
|
void f65(struct t65 a0) {
|
|
|
|
}
|
2021-01-27 03:53:25 +08:00
|
|
|
|
2022-03-29 09:41:31 +08:00
|
|
|
typedef float t66 __attribute__((__vector_size__(128), __aligned__(128)));
|
|
|
|
|
|
|
|
// AVX512: @f66(<32 x float>* noundef byval(<32 x float>) align 128 %0)
|
|
|
|
void f66(t66 a0) {
|
|
|
|
}
|
|
|
|
|
2021-01-27 03:53:25 +08:00
|
|
|
/// The synthesized __va_list_tag does not have file/line fields.
|
|
|
|
// CHECK: = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "__va_list_tag",
|
|
|
|
// CHECK-NOT: file:
|
|
|
|
// CHECK-NOT: line:
|
|
|
|
// CHECK-SAME: size:
|