Complete Rewrite of CGRecordLayoutBuilder
CGRecordLayoutBuilder was aging, complex, multi-pass, and shows signs of
existing before ASTRecordLayoutBuilder. It redundantly performed many
layout operations that are now performed by ASTRecordLayoutBuilder and
asserted that the results were the same. With the addition of support
for the MS-ABI, such as placement of vbptrs, vtordisps, different
bitfield layout and a variety of other features, CGRecordLayoutBuilder
was growing unwieldy in its redundancy.
This patch re-architects CGRecordLayoutBuilder to not perform any
redundant layout but rather, as directly as possible, lower an
ASTRecordLayout to an llvm::type. The new architecture is significantly
smaller and simpler than the CGRecordLayoutBuilder and contains fewer
ABI-specific code paths. It's also one pass.
The architecture of the new system is described in the comments. For the
most part, the new system simply takes all of the fields and bases from
an ASTRecordLayout, sorts them, inserts padding and dumps a record.
Bitfields, unions and primary virtual bases make this process a bit more
complicated. See the inline comments.
In addition, this patch updates a few lit tests due to the fact that the
new system computes more accurate llvm types than CGRecordLayoutBuilder.
Each change is commented individually in the review.
Differential Revision: http://llvm-reviews.chandlerc.com/D2795
llvm-svn: 201907
2014-02-22 07:49:50 +08:00
|
|
|
// RUN: %clang_cc1 -triple=%itanium_abi_triple -emit-llvm < %s | FileCheck %s -check-prefix CHECK -check-prefix CHECK-IT
|
|
|
|
// RUN: %clang_cc1 -triple=%ms_abi_triple -emit-llvm < %s | FileCheck %s -check-prefix CHECK -check-prefix CHECK-MS
|
2008-06-14 07:01:12 +08:00
|
|
|
|
|
|
|
int S;
|
|
|
|
volatile int vS;
|
|
|
|
|
|
|
|
int* pS;
|
|
|
|
volatile int* pvS;
|
|
|
|
|
|
|
|
int A[10];
|
|
|
|
volatile int vA[10];
|
|
|
|
|
|
|
|
struct { int x; } F;
|
|
|
|
struct { volatile int x; } vF;
|
|
|
|
|
|
|
|
struct { int x; } F2;
|
|
|
|
volatile struct { int x; } vF2;
|
|
|
|
volatile struct { int x; } *vpF2;
|
|
|
|
|
|
|
|
struct { struct { int y; } x; } F3;
|
|
|
|
volatile struct { struct { int y; } x; } vF3;
|
|
|
|
|
|
|
|
struct { int x:3; } BF;
|
|
|
|
struct { volatile int x:3; } vBF;
|
|
|
|
|
|
|
|
typedef int v4si __attribute__ ((vector_size (16)));
|
|
|
|
v4si V;
|
|
|
|
volatile v4si vV;
|
|
|
|
|
|
|
|
typedef __attribute__(( ext_vector_type(4) )) int extv4;
|
|
|
|
extv4 VE;
|
|
|
|
volatile extv4 vVE;
|
|
|
|
|
|
|
|
volatile struct {int x;} aggFct(void);
|
|
|
|
|
2009-11-04 07:32:42 +08:00
|
|
|
typedef volatile int volatile_int;
|
|
|
|
volatile_int vtS;
|
|
|
|
|
2009-07-25 12:36:53 +08:00
|
|
|
int main() {
|
2008-06-14 07:01:12 +08:00
|
|
|
int i;
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: [[I:%[a-zA-Z0-9_.]+]] = alloca i32
|
2008-06-14 07:01:12 +08:00
|
|
|
// load
|
|
|
|
i=S;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* @S
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2008-06-14 07:01:12 +08:00
|
|
|
i=vS;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load volatile i32, i32* @vS
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2008-06-14 07:01:12 +08:00
|
|
|
i=*pS;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: [[PS_VAL:%[a-zA-Z0-9_.]+]] = load i32*, i32** @pS
|
|
|
|
// CHECK: load i32, i32* [[PS_VAL]]
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2008-06-14 07:01:12 +08:00
|
|
|
i=*pvS;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: [[PVS_VAL:%[a-zA-Z0-9_.]+]] = load i32*, i32** @pvS
|
|
|
|
// CHECK: load volatile i32, i32* [[PVS_VAL]]
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2008-06-14 07:01:12 +08:00
|
|
|
i=A[2];
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* getelementptr {{.*}} @A
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2008-06-14 07:01:12 +08:00
|
|
|
i=vA[2];
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load volatile i32, i32* getelementptr {{.*}} @vA
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2008-06-14 07:01:12 +08:00
|
|
|
i=F.x;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* getelementptr {{.*}} @F
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2008-06-14 07:01:12 +08:00
|
|
|
i=vF.x;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load volatile i32, i32* getelementptr {{.*}} @vF
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2008-06-14 07:01:12 +08:00
|
|
|
i=F2.x;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* getelementptr {{.*}} @F2
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2008-06-14 07:01:12 +08:00
|
|
|
i=vF2.x;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load volatile i32, i32* getelementptr {{.*}} @vF2
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2008-06-14 07:01:12 +08:00
|
|
|
i=vpF2->x;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: [[VPF2_VAL:%[a-zA-Z0-9_.]+]] = load {{%[a-zA-Z0-9_.]+}}*, {{%[a-zA-Z0-9_.]+}}** @vpF2
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: [[ELT:%[a-zA-Z0-9_.]+]] = getelementptr {{.*}} [[VPF2_VAL]]
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load volatile i32, i32* [[ELT]]
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2008-06-14 07:01:12 +08:00
|
|
|
i=F3.x.y;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* getelementptr {{.*}} @F3
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2008-06-14 07:01:12 +08:00
|
|
|
i=vF3.x.y;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load volatile i32, i32* getelementptr {{.*}} @vF3
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2008-06-14 07:01:12 +08:00
|
|
|
i=BF.x;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK-IT: load i8, i8* getelementptr {{.*}} @BF
|
|
|
|
// CHECK-MS: load i32, i32* getelementptr {{.*}} @BF
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2008-06-14 07:01:12 +08:00
|
|
|
i=vBF.x;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK-IT: load volatile i8, i8* getelementptr {{.*}} @vBF
|
|
|
|
// CHECK-MS: load volatile i32, i32* getelementptr {{.*}} @vBF
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2008-06-14 07:01:12 +08:00
|
|
|
i=V[3];
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load <4 x i32>, <4 x i32>* @V
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2008-06-14 07:01:12 +08:00
|
|
|
i=vV[3];
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load volatile <4 x i32>, <4 x i32>* @vV
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2008-06-14 07:01:12 +08:00
|
|
|
i=VE.yx[1];
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load <4 x i32>, <4 x i32>* @VE
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2008-06-14 07:01:12 +08:00
|
|
|
i=vVE.zy[1];
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load volatile <4 x i32>, <4 x i32>* @vVE
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2010-07-13 16:18:22 +08:00
|
|
|
i = aggFct().x; // Note: not volatile
|
2013-02-08 16:00:13 +08:00
|
|
|
// N.b. Aggregate return is extremely target specific, all we can
|
|
|
|
// really say here is that there probably shouldn't be a volatile
|
|
|
|
// load.
|
|
|
|
// CHECK-NOT: load volatile
|
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2009-11-04 07:32:42 +08:00
|
|
|
i=vtS;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load volatile i32, i32* @vtS
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2008-06-14 07:01:12 +08:00
|
|
|
|
|
|
|
|
|
|
|
// store
|
|
|
|
S=i;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* [[I]]
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* @S
|
2008-06-14 07:01:12 +08:00
|
|
|
vS=i;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* [[I]]
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store volatile i32 {{.*}}, i32* @vS
|
2008-06-14 07:01:12 +08:00
|
|
|
*pS=i;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* [[I]]
|
|
|
|
// CHECK: [[PS_VAL:%[a-zA-Z0-9_.]+]] = load i32*, i32** @pS
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[PS_VAL]]
|
2008-06-14 07:01:12 +08:00
|
|
|
*pvS=i;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* [[I]]
|
|
|
|
// CHECK: [[PVS_VAL:%[a-zA-Z0-9_.]+]] = load i32*, i32** @pvS
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store volatile i32 {{.*}}, i32* [[PVS_VAL]]
|
2008-06-14 07:01:12 +08:00
|
|
|
A[2]=i;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* [[I]]
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* getelementptr {{.*}} @A
|
2008-06-14 07:01:12 +08:00
|
|
|
vA[2]=i;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* [[I]]
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store volatile i32 {{.*}}, i32* getelementptr {{.*}} @vA
|
2008-06-14 07:01:12 +08:00
|
|
|
F.x=i;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* [[I]]
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* getelementptr {{.*}} @F
|
2008-06-14 07:01:12 +08:00
|
|
|
vF.x=i;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* [[I]]
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store volatile i32 {{.*}}, i32* getelementptr {{.*}} @vF
|
2008-06-14 07:01:12 +08:00
|
|
|
F2.x=i;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* [[I]]
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* getelementptr {{.*}} @F2
|
2008-06-14 07:01:12 +08:00
|
|
|
vF2.x=i;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* [[I]]
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store volatile i32 {{.*}}, i32* getelementptr {{.*}} @vF2
|
2008-06-14 07:01:12 +08:00
|
|
|
vpF2->x=i;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* [[I]]
|
|
|
|
// CHECK: [[VPF2_VAL:%[a-zA-Z0-9_.]+]] = load {{%[a-zA-Z0-9._]+}}*, {{%[a-zA-Z0-9._]+}}** @vpF2
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: [[ELT:%[a-zA-Z0-9_.]+]] = getelementptr {{.*}} [[VPF2_VAL]]
|
|
|
|
// CHECK: store volatile i32 {{.*}}, i32* [[ELT]]
|
2008-06-14 07:01:12 +08:00
|
|
|
vF3.x.y=i;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* [[I]]
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store volatile i32 {{.*}}, i32* getelementptr {{.*}} @vF3
|
2008-06-14 07:01:12 +08:00
|
|
|
BF.x=i;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* [[I]]
|
|
|
|
// CHECK-IT: load i8, i8* getelementptr {{.*}} @BF
|
|
|
|
// CHECK-MS: load i32, i32* getelementptr {{.*}} @BF
|
Complete Rewrite of CGRecordLayoutBuilder
CGRecordLayoutBuilder was aging, complex, multi-pass, and shows signs of
existing before ASTRecordLayoutBuilder. It redundantly performed many
layout operations that are now performed by ASTRecordLayoutBuilder and
asserted that the results were the same. With the addition of support
for the MS-ABI, such as placement of vbptrs, vtordisps, different
bitfield layout and a variety of other features, CGRecordLayoutBuilder
was growing unwieldy in its redundancy.
This patch re-architects CGRecordLayoutBuilder to not perform any
redundant layout but rather, as directly as possible, lower an
ASTRecordLayout to an llvm::type. The new architecture is significantly
smaller and simpler than the CGRecordLayoutBuilder and contains fewer
ABI-specific code paths. It's also one pass.
The architecture of the new system is described in the comments. For the
most part, the new system simply takes all of the fields and bases from
an ASTRecordLayout, sorts them, inserts padding and dumps a record.
Bitfields, unions and primary virtual bases make this process a bit more
complicated. See the inline comments.
In addition, this patch updates a few lit tests due to the fact that the
new system computes more accurate llvm types than CGRecordLayoutBuilder.
Each change is commented individually in the review.
Differential Revision: http://llvm-reviews.chandlerc.com/D2795
llvm-svn: 201907
2014-02-22 07:49:50 +08:00
|
|
|
// CHECK-IT: store i8 {{.*}}, i8* getelementptr {{.*}} @BF
|
|
|
|
// CHECK-MS: store i32 {{.*}}, i32* getelementptr {{.*}} @BF
|
2008-11-19 17:36:46 +08:00
|
|
|
vBF.x=i;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* [[I]]
|
|
|
|
// CHECK-IT: load volatile i8, i8* getelementptr {{.*}} @vBF
|
|
|
|
// CHECK-MS: load volatile i32, i32* getelementptr {{.*}} @vBF
|
Complete Rewrite of CGRecordLayoutBuilder
CGRecordLayoutBuilder was aging, complex, multi-pass, and shows signs of
existing before ASTRecordLayoutBuilder. It redundantly performed many
layout operations that are now performed by ASTRecordLayoutBuilder and
asserted that the results were the same. With the addition of support
for the MS-ABI, such as placement of vbptrs, vtordisps, different
bitfield layout and a variety of other features, CGRecordLayoutBuilder
was growing unwieldy in its redundancy.
This patch re-architects CGRecordLayoutBuilder to not perform any
redundant layout but rather, as directly as possible, lower an
ASTRecordLayout to an llvm::type. The new architecture is significantly
smaller and simpler than the CGRecordLayoutBuilder and contains fewer
ABI-specific code paths. It's also one pass.
The architecture of the new system is described in the comments. For the
most part, the new system simply takes all of the fields and bases from
an ASTRecordLayout, sorts them, inserts padding and dumps a record.
Bitfields, unions and primary virtual bases make this process a bit more
complicated. See the inline comments.
In addition, this patch updates a few lit tests due to the fact that the
new system computes more accurate llvm types than CGRecordLayoutBuilder.
Each change is commented individually in the review.
Differential Revision: http://llvm-reviews.chandlerc.com/D2795
llvm-svn: 201907
2014-02-22 07:49:50 +08:00
|
|
|
// CHECK-IT: store volatile i8 {{.*}}, i8* getelementptr {{.*}} @vBF
|
|
|
|
// CHECK-MS: store volatile i32 {{.*}}, i32* getelementptr {{.*}} @vBF
|
2008-06-14 07:01:12 +08:00
|
|
|
V[3]=i;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* [[I]]
|
|
|
|
// CHECK: load <4 x i32>, <4 x i32>* @V
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store <4 x i32> {{.*}}, <4 x i32>* @V
|
2008-06-14 07:01:12 +08:00
|
|
|
vV[3]=i;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* [[I]]
|
|
|
|
// CHECK: load volatile <4 x i32>, <4 x i32>* @vV
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store volatile <4 x i32> {{.*}}, <4 x i32>* @vV
|
2009-11-04 07:32:42 +08:00
|
|
|
vtS=i;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* [[I]]
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store volatile i32 {{.*}}, i32* @vtS
|
2008-06-14 07:01:12 +08:00
|
|
|
|
|
|
|
// other ops:
|
|
|
|
++S;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* @S
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* @S
|
2008-06-14 07:01:12 +08:00
|
|
|
++vS;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load volatile i32, i32* @vS
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store volatile i32 {{.*}}, i32* @vS
|
2008-06-14 07:01:12 +08:00
|
|
|
i+=S;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load i32, i32* @S
|
|
|
|
// CHECK: load i32, i32* [[I]]
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2008-06-14 07:01:12 +08:00
|
|
|
i+=vS;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load volatile i32, i32* @vS
|
|
|
|
// CHECK: load i32, i32* [[I]]
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store i32 {{.*}}, i32* [[I]]
|
2009-11-04 07:32:42 +08:00
|
|
|
++vtS;
|
2015-02-28 05:19:58 +08:00
|
|
|
// CHECK: load volatile i32, i32* @vtS
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: store volatile i32 {{.*}}, i32* @vtS
|
2009-05-27 06:03:21 +08:00
|
|
|
(void)vF2;
|
2013-02-07 23:39:16 +08:00
|
|
|
// From vF2 to a temporary
|
Change memcpy/memove/memset to have dest and source alignment attributes (Step 1).
Summary:
Upstream LLVM is changing the the prototypes of the @llvm.memcpy/memmove/memset
intrinsics. This change updates the Clang tests for this change.
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change removes the alignment argument in favour of placing the alignment
attribute on the source and destination pointers of the memory intrinsic call.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
At this time the source and destination alignments must be the same (Step 1).
Step 2 of the change, to be landed shortly, will relax that contraint and allow
the source and destination to have different alignments.
llvm-svn: 322964
2018-01-20 01:12:54 +08:00
|
|
|
// CHECK: call void @llvm.memcpy.{{.*}}(i8* align {{[0-9]+}} %{{.*}}, i8* {{.*}} @vF2 {{.*}}, i1 true)
|
2009-05-27 06:03:21 +08:00
|
|
|
vF2 = vF2;
|
2013-02-07 23:39:16 +08:00
|
|
|
// vF2 to itself
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: call void @llvm.memcpy.{{.*}}(i8* {{.*@vF2.*}}, i8* {{.*@vF2.*}}, i1 true)
|
2009-05-27 06:03:21 +08:00
|
|
|
vF2 = vF2 = vF2;
|
2013-02-07 23:39:16 +08:00
|
|
|
// vF2 to itself twice
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: call void @llvm.memcpy.{{.*}}(i8* {{.*@vF2.*}}, i8* {{.*@vF2.*}}, i1 true)
|
|
|
|
// CHECK: call void @llvm.memcpy.{{.*}}(i8* {{.*@vF2.*}}, i8* {{.*@vF2.*}}, i1 true)
|
2010-08-26 07:42:51 +08:00
|
|
|
vF2 = (vF2, vF2);
|
2013-02-07 23:39:16 +08:00
|
|
|
// vF2 to a temporary, then vF2 to itself
|
Change memcpy/memove/memset to have dest and source alignment attributes (Step 1).
Summary:
Upstream LLVM is changing the the prototypes of the @llvm.memcpy/memmove/memset
intrinsics. This change updates the Clang tests for this change.
The @llvm.memcpy/memmove/memset intrinsics currently have an explicit argument
which is required to be a constant integer. It represents the alignment of the
dest (and source), and so must be the minimum of the actual alignment of the
two.
This change removes the alignment argument in favour of placing the alignment
attribute on the source and destination pointers of the memory intrinsic call.
For example, code which used to read:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 100, i32 4, i1 false)
will now read
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 100, i1 false)
At this time the source and destination alignments must be the same (Step 1).
Step 2 of the change, to be landed shortly, will relax that contraint and allow
the source and destination to have different alignments.
llvm-svn: 322964
2018-01-20 01:12:54 +08:00
|
|
|
// CHECK: call void @llvm.memcpy.{{.*}}(i8* align {{[0-9]+}} %{{.*}}, i8* {{.*@vF2.*}}, i1 true)
|
2013-02-08 16:00:13 +08:00
|
|
|
// CHECK: call void @llvm.memcpy.{{.*}}(i8* {{.*@vF2.*}}, i8* {{.*@vF2.*}}, i1 true)
|
2008-06-14 07:01:12 +08:00
|
|
|
}
|