llvm-project/clang/test/CodeGenObjC/blocks.m

136 lines
5.4 KiB
Mathematica
Raw Normal View History

// RUN: %clang_cc1 -triple i386-apple-darwin9 -fobjc-runtime=macosx-fragile-10.5 -emit-llvm -fblocks -o - %s | FileCheck %s
// test1. All of this is somehow testing rdar://6676764
struct S {
void (^F)(struct S*);
} P;
@interface T
- (int)foo: (T* (^)(T*)) x;
@end
void foo(T *P) {
[P foo: 0];
}
@interface A
-(void) im0;
@end
// CHECK: define internal i32 @"__8-[A im0]_block_invoke"(
@implementation A
-(void) im0 {
(void) ^{ return 1; }();
}
@end
@interface B : A @end
@implementation B
-(void) im1 {
^(void) { [self im0]; }();
}
-(void) im2 {
^{ [super im0]; }();
}
-(void) im3 {
^{ ^{[super im0];}(); }();
}
@end
// rdar://problem/9006315
// In-depth test for the initialization of a __weak __block variable.
@interface Test2 -(void) destroy; @end
void test2(Test2 *x) {
extern void test2_helper(void (^)(void));
// CHECK-LABEL: define void @test2(
// CHECK: [[X:%.*]] = alloca [[TEST2:%.*]]*,
// CHECK-NEXT: [[WEAKX:%.*]] = alloca [[WEAK_T:%.*]],
// CHECK-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]],
// CHECK-NEXT: store [[TEST2]]*
// isa=1 for weak byrefs.
// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[WEAK_T]], [[WEAK_T]]* [[WEAKX]], i32 0, i32 0
// CHECK-NEXT: store i8* inttoptr (i32 1 to i8*), i8** [[T0]]
// Forwarding.
// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[WEAK_T]], [[WEAK_T]]* [[WEAKX]], i32 0, i32 1
// CHECK-NEXT: store [[WEAK_T]]* [[WEAKX]], [[WEAK_T]]** [[T1]]
// Flags. This is just BLOCK_HAS_COPY_DISPOSE BLOCK_BYREF_LAYOUT_UNRETAINED
// CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds [[WEAK_T]], [[WEAK_T]]* [[WEAKX]], i32 0, i32 2
// CHECK-NEXT: store i32 1375731712, i32* [[T2]]
// Size.
// CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds [[WEAK_T]], [[WEAK_T]]* [[WEAKX]], i32 0, i32 3
// CHECK-NEXT: store i32 28, i32* [[T3]]
// Copy and dispose helpers.
// CHECK-NEXT: [[T4:%.*]] = getelementptr inbounds [[WEAK_T]], [[WEAK_T]]* [[WEAKX]], i32 0, i32 4
// CHECK-NEXT: store i8* bitcast (void (i8*, i8*)* @__Block_byref_object_copy_{{.*}} to i8*), i8** [[T4]]
// CHECK-NEXT: [[T5:%.*]] = getelementptr inbounds [[WEAK_T]], [[WEAK_T]]* [[WEAKX]], i32 0, i32 5
// CHECK-NEXT: store i8* bitcast (void (i8*)* @__Block_byref_object_dispose_{{.*}} to i8*), i8** [[T5]]
// Actually capture the value.
// CHECK-NEXT: [[T6:%.*]] = getelementptr inbounds [[WEAK_T]], [[WEAK_T]]* [[WEAKX]], i32 0, i32 6
// CHECK-NEXT: [[CAPTURE:%.*]] = load [[TEST2]]*, [[TEST2]]** [[X]]
// CHECK-NEXT: store [[TEST2]]* [[CAPTURE]], [[TEST2]]** [[T6]]
// Then we initialize the block, blah blah blah.
// CHECK: call void @test2_helper(
// Finally, kill the variable with BLOCK_FIELD_IS_BYREF. We're not
// supposed to pass BLOCK_FIELD_IS_WEAK here.
// CHECK: [[T0:%.*]] = bitcast [[WEAK_T]]* [[WEAKX]] to i8*
// CHECK: call void @_Block_object_dispose(i8* [[T0]], i32 8)
Define weak and __weak to mean ARC-style weak references, even in MRC. Previously, __weak was silently accepted and ignored in MRC mode. That makes this a potentially source-breaking change that we have to roll out cautiously. Accordingly, for the time being, actual support for __weak references in MRC is experimental, and the compiler will reject attempts to actually form such references. The intent is to eventually enable the feature by default in all non-GC modes. (It is, of course, incompatible with ObjC GC's interpretation of __weak.) If you like, you can enable this feature with -Xclang -fobjc-weak but like any -Xclang option, this option may be removed at any point, e.g. if/when it is eventually enabled by default. This patch also enables the use of the ARC __unsafe_unretained qualifier in MRC. Unlike __weak, this is being enabled immediately. Since variables are essentially __unsafe_unretained by default in MRC, the only practical uses are (1) communication and (2) changing the default behavior of by-value block capture. As an implementation matter, this means that the ObjC ownership qualifiers may appear in any ObjC language mode, and so this patch removes a number of checks for getLangOpts().ObjCAutoRefCount that were guarding the processing of these qualifiers. I don't expect this to be a significant drain on performance; it may even be faster to just check for these qualifiers directly on a type (since it's probably in a register anyway) than to do N dependent loads to grab the LangOptions. rdar://9674298 llvm-svn: 251041
2015-10-23 02:38:17 +08:00
__attribute__((objc_gc(weak))) __block Test2 *weakX = x;
test2_helper(^{ [weakX destroy]; });
}
// rdar://problem/9124263
// In the test above, check that the use in the invocation function
// doesn't require a read barrier.
// CHECK-LABEL: define internal void @__test2_block_invoke
// CHECK: [[BLOCK:%.*]] = bitcast i8* {{%.*}} to [[BLOCK_T]]*
// CHECK-NOT: bitcast
// CHECK: [[T0:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5
// CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[T0]]
// CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to [[WEAK_T]]{{.*}}*
// CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds [[WEAK_T]]{{.*}}, [[WEAK_T]]{{.*}}* [[T2]], i32 0, i32 1
// CHECK-NEXT: [[T4:%.*]] = load [[WEAK_T]]{{.*}}*, [[WEAK_T]]{{.*}}** [[T3]]
// CHECK-NEXT: [[WEAKX:%.*]] = getelementptr inbounds [[WEAK_T]]{{.*}}, [[WEAK_T]]{{.*}}* [[T4]], i32 0, i32 6
// CHECK-NEXT: [[T0:%.*]] = load [[TEST2]]*, [[TEST2]]** [[WEAKX]], align 4
// rdar://problem/12722954
// Make sure that ... is appropriately positioned in a block call.
void test3(void (^block)(int, ...)) {
block(0, 1, 2, 3);
}
// CHECK-LABEL: define void @test3(
// CHECK: [[BLOCK:%.*]] = alloca void (i32, ...)*, align 4
// CHECK-NEXT: store void (i32, ...)*
// CHECK-NEXT: [[T0:%.*]] = load void (i32, ...)*, void (i32, ...)** [[BLOCK]], align 4
// CHECK-NEXT: [[T1:%.*]] = bitcast void (i32, ...)* [[T0]] to [[BLOCK_T:%.*]]*
// CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[T1]], i32 0, i32 3
// CHECK-NEXT: [[T3:%.*]] = bitcast [[BLOCK_T]]* [[T1]] to i8*
// CHECK-NEXT: [[T4:%.*]] = load i8*, i8** [[T2]]
// CHECK-NEXT: [[T5:%.*]] = bitcast i8* [[T4]] to void (i8*, i32, ...)*
// CHECK-NEXT: call void (i8*, i32, ...) [[T5]](i8* [[T3]], i32 0, i32 1, i32 2, i32 3)
// CHECK-NEXT: ret void
void test4(void (^block)()) {
block(0, 1, 2, 3);
}
// CHECK-LABEL: define void @test4(
// CHECK: [[BLOCK:%.*]] = alloca void (...)*, align 4
// CHECK-NEXT: store void (...)*
// CHECK-NEXT: [[T0:%.*]] = load void (...)*, void (...)** [[BLOCK]], align 4
// CHECK-NEXT: [[T1:%.*]] = bitcast void (...)* [[T0]] to [[BLOCK_T:%.*]]*
// CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds [[BLOCK_T]], [[BLOCK_T]]* [[T1]], i32 0, i32 3
// CHECK-NEXT: [[T3:%.*]] = bitcast [[BLOCK_T]]* [[T1]] to i8*
// CHECK-NEXT: [[T4:%.*]] = load i8*, i8** [[T2]]
// CHECK-NEXT: [[T5:%.*]] = bitcast i8* [[T4]] to void (i8*, i32, i32, i32, i32)*
// CHECK-NEXT: call void [[T5]](i8* [[T3]], i32 0, i32 1, i32 2, i32 3)
// CHECK-NEXT: ret void