llvm-project/llvm/test/Transforms/ObjCARC/escape.ll

135 lines
8.6 KiB
LLVM
Raw Normal View History

; RUN: opt -objc-arc -S < %s | FileCheck %s
; rdar://11229925
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
%struct.__block_byref_weakLogNTimes = type { i8*, %struct.__block_byref_weakLogNTimes*, i32, i32, i8*, i8*, void (...)* }
%struct.__block_descriptor = type { i64, i64 }
; Don't optimize away the retainBlock, because the object's address "escapes"
; with the objc_storeWeak call.
; CHECK-LABEL: define void @test0(
; CHECK: %tmp7 = call i8* @objc_retainBlock(i8* %tmp6) [[NUW:#[0-9]+]], !clang.arc.copy_on_escape !0
; CHECK: call void @objc_release(i8* %tmp7) [[NUW]], !clang.imprecise_release !0
; CHECK: }
define void @test0() nounwind {
entry:
%weakLogNTimes = alloca %struct.__block_byref_weakLogNTimes, align 8
%block = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, align 8
%byref.isa = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 0
store i8* null, i8** %byref.isa, align 8
%byref.forwarding = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 1
store %struct.__block_byref_weakLogNTimes* %weakLogNTimes, %struct.__block_byref_weakLogNTimes** %byref.forwarding, align 8
%byref.flags = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 2
store i32 33554432, i32* %byref.flags, align 8
%byref.size = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 3
store i32 48, i32* %byref.size, align 4
%tmp1 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 4
store i8* bitcast (void (i8*, i8*)* @__Block_byref_object_copy_ to i8*), i8** %tmp1, align 8
%tmp2 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 5
store i8* bitcast (void (i8*)* @__Block_byref_object_dispose_ to i8*), i8** %tmp2, align 8
%weakLogNTimes1 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 6
%tmp3 = bitcast void (...)** %weakLogNTimes1 to i8**
%tmp4 = call i8* @objc_initWeak(i8** %tmp3, i8* null) nounwind
%block.isa = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 0
store i8* null, i8** %block.isa, align 8
%block.flags = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 1
store i32 1107296256, i32* %block.flags, align 8
%block.reserved = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 2
store i32 0, i32* %block.reserved, align 4
%block.invoke = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 3
store i8* bitcast (void (i8*, i32)* @__main_block_invoke_0 to i8*), i8** %block.invoke, align 8
%block.descriptor = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 4
store %struct.__block_descriptor* null, %struct.__block_descriptor** %block.descriptor, align 8
%block.captured = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 5
%tmp5 = bitcast %struct.__block_byref_weakLogNTimes* %weakLogNTimes to i8*
store i8* %tmp5, i8** %block.captured, align 8
%tmp6 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block to i8*
%tmp7 = call i8* @objc_retainBlock(i8* %tmp6) nounwind, !clang.arc.copy_on_escape !0
%tmp8 = load %struct.__block_byref_weakLogNTimes** %byref.forwarding, align 8
%weakLogNTimes3 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %tmp8, i64 0, i32 6
%tmp9 = bitcast void (...)** %weakLogNTimes3 to i8**
%tmp10 = call i8* @objc_storeWeak(i8** %tmp9, i8* %tmp7) nounwind
%tmp11 = getelementptr inbounds i8* %tmp7, i64 16
%tmp12 = bitcast i8* %tmp11 to i8**
%tmp13 = load i8** %tmp12, align 8
%tmp14 = bitcast i8* %tmp13 to void (i8*, i32)*
call void %tmp14(i8* %tmp7, i32 10) nounwind, !clang.arc.no_objc_arc_exceptions !0
call void @objc_release(i8* %tmp7) nounwind, !clang.imprecise_release !0
call void @_Block_object_dispose(i8* %tmp5, i32 8) nounwind
call void @objc_destroyWeak(i8** %tmp3) nounwind
ret void
}
; Like test0, but it makes a regular call instead of a storeWeak call,
; so the optimization is valid.
; CHECK-LABEL: define void @test1(
; CHECK-NOT: @objc_retainBlock
; CHECK: }
define void @test1() nounwind {
entry:
%weakLogNTimes = alloca %struct.__block_byref_weakLogNTimes, align 8
%block = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, align 8
%byref.isa = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 0
store i8* null, i8** %byref.isa, align 8
%byref.forwarding = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 1
store %struct.__block_byref_weakLogNTimes* %weakLogNTimes, %struct.__block_byref_weakLogNTimes** %byref.forwarding, align 8
%byref.flags = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 2
store i32 33554432, i32* %byref.flags, align 8
%byref.size = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 3
store i32 48, i32* %byref.size, align 4
%tmp1 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 4
store i8* bitcast (void (i8*, i8*)* @__Block_byref_object_copy_ to i8*), i8** %tmp1, align 8
%tmp2 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 5
store i8* bitcast (void (i8*)* @__Block_byref_object_dispose_ to i8*), i8** %tmp2, align 8
%weakLogNTimes1 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 6
%tmp3 = bitcast void (...)** %weakLogNTimes1 to i8**
%tmp4 = call i8* @objc_initWeak(i8** %tmp3, i8* null) nounwind
%block.isa = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 0
store i8* null, i8** %block.isa, align 8
%block.flags = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 1
store i32 1107296256, i32* %block.flags, align 8
%block.reserved = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 2
store i32 0, i32* %block.reserved, align 4
%block.invoke = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 3
store i8* bitcast (void (i8*, i32)* @__main_block_invoke_0 to i8*), i8** %block.invoke, align 8
%block.descriptor = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 4
store %struct.__block_descriptor* null, %struct.__block_descriptor** %block.descriptor, align 8
%block.captured = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 5
%tmp5 = bitcast %struct.__block_byref_weakLogNTimes* %weakLogNTimes to i8*
store i8* %tmp5, i8** %block.captured, align 8
%tmp6 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block to i8*
%tmp7 = call i8* @objc_retainBlock(i8* %tmp6) nounwind, !clang.arc.copy_on_escape !0
%tmp8 = load %struct.__block_byref_weakLogNTimes** %byref.forwarding, align 8
%weakLogNTimes3 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %tmp8, i64 0, i32 6
%tmp9 = bitcast void (...)** %weakLogNTimes3 to i8**
%tmp10 = call i8* @not_really_objc_storeWeak(i8** %tmp9, i8* %tmp7) nounwind
%tmp11 = getelementptr inbounds i8* %tmp7, i64 16
%tmp12 = bitcast i8* %tmp11 to i8**
%tmp13 = load i8** %tmp12, align 8
%tmp14 = bitcast i8* %tmp13 to void (i8*, i32)*
call void %tmp14(i8* %tmp7, i32 10) nounwind, !clang.arc.no_objc_arc_exceptions !0
call void @objc_release(i8* %tmp7) nounwind, !clang.imprecise_release !0
call void @_Block_object_dispose(i8* %tmp5, i32 8) nounwind
call void @objc_destroyWeak(i8** %tmp3) nounwind
ret void
}
declare void @__Block_byref_object_copy_(i8*, i8*) nounwind
declare void @__Block_byref_object_dispose_(i8*) nounwind
declare void @objc_destroyWeak(i8**)
declare i8* @objc_initWeak(i8**, i8*)
declare void @__main_block_invoke_0(i8* nocapture, i32) nounwind ssp
declare void @_Block_object_dispose(i8*, i32)
declare i8* @objc_retainBlock(i8*)
declare i8* @objc_storeWeak(i8**, i8*)
declare i8* @not_really_objc_storeWeak(i8**, i8*)
declare void @objc_release(i8*)
IR: Make metadata typeless in assembly Now that `Metadata` is typeless, reflect that in the assembly. These are the matching assembly changes for the metadata/value split in r223802. - Only use the `metadata` type when referencing metadata from a call intrinsic -- i.e., only when it's used as a `Value`. - Stop pretending that `ValueAsMetadata` is wrapped in an `MDNode` when referencing it from call intrinsics. So, assembly like this: define @foo(i32 %v) { call void @llvm.foo(metadata !{i32 %v}, metadata !0) call void @llvm.foo(metadata !{i32 7}, metadata !0) call void @llvm.foo(metadata !1, metadata !0) call void @llvm.foo(metadata !3, metadata !0) call void @llvm.foo(metadata !{metadata !3}, metadata !0) ret void, !bar !2 } !0 = metadata !{metadata !2} !1 = metadata !{i32* @global} !2 = metadata !{metadata !3} !3 = metadata !{} turns into this: define @foo(i32 %v) { call void @llvm.foo(metadata i32 %v, metadata !0) call void @llvm.foo(metadata i32 7, metadata !0) call void @llvm.foo(metadata i32* @global, metadata !0) call void @llvm.foo(metadata !3, metadata !0) call void @llvm.foo(metadata !{!3}, metadata !0) ret void, !bar !2 } !0 = !{!2} !1 = !{i32* @global} !2 = !{!3} !3 = !{} I wrote an upgrade script that handled almost all of the tests in llvm and many of the tests in cfe (even handling many `CHECK` lines). I've attached it (or will attach it in a moment if you're speedy) to PR21532 to help everyone update their out-of-tree testcases. This is part of PR21532. llvm-svn: 224257
2014-12-16 03:07:53 +08:00
!0 = !{}
; CHECK: attributes [[NUW]] = { nounwind }
; CHECK: attributes #1 = { nounwind ssp }