2015-03-06 03:52:13 +08:00
|
|
|
; RUN: opt %s -rewrite-statepoints-for-gc -S 2>&1 | FileCheck %s
|
|
|
|
|
|
|
|
|
|
|
|
declare void @foo()
|
2015-03-06 06:28:06 +08:00
|
|
|
declare void @use(...)
|
2015-03-06 03:52:13 +08:00
|
|
|
|
|
|
|
define i64 addrspace(1)* @test1(i64 addrspace(1)* %obj, i64 addrspace(1)* %obj2, i1 %condition) gc "statepoint-example" {
|
|
|
|
entry:
|
|
|
|
; CHECK-LABEL: @test1
|
|
|
|
; CHECK-DAG: %obj.relocated
|
|
|
|
; CHECK-DAG: %obj2.relocated
|
Extend the statepoint intrinsic to allow statepoints to be marked as transitions from GC-aware code to code that is not GC-aware.
This changes the shape of the statepoint intrinsic from:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 unused, ...call args, i32 # deopt args, ...deopt args, ...gc args)
to:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 flags, ...call args, i32 # transition args, ...transition args, i32 # deopt args, ...deopt args, ...gc args)
This extension offers the backend the opportunity to insert (somewhat) arbitrary code to manage the transition from GC-aware code to code that is not GC-aware and back.
In order to support the injection of transition code, this extension wraps the STATEPOINT ISD node generated by the usual lowering lowering with two additional nodes: GC_TRANSITION_START and GC_TRANSITION_END. The transition arguments that were passed passed to the intrinsic (if any) are lowered and provided as operands to these nodes and may be used by the backend during code generation.
Eventually, the lowering of the GC_TRANSITION_{START,END} nodes should be informed by the GC strategy in use for the function containing the intrinsic call; for now, these nodes are instead replaced with no-ops.
Differential Revision: http://reviews.llvm.org/D9501
llvm-svn: 236888
2015-05-09 02:07:42 +08:00
|
|
|
%safepoint_token = call i32 (void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* @foo, i32 0, i32 0, i32 0, i32 0)
|
2015-03-06 03:52:13 +08:00
|
|
|
br label %joint
|
|
|
|
|
|
|
|
joint:
|
|
|
|
; CHECK-LABEL: joint:
|
[RewriteStatepointsForGC] Fix a bug on creating gc_relocate for pointer to vector of pointers
Summary:
In RewriteStatepointsForGC pass, we create a gc_relocate intrinsic for
each relocated pointer, and the gc_relocate has the same type with the
pointer. During the creation of gc_relocate intrinsic, llvm requires to
mangle its type. However, llvm does not support mangling of all possible
types. RewriteStatepointsForGC will hit an assertion failure when it
tries to create a gc_relocate for pointer to vector of pointers because
mangling for vector of pointers is not supported.
This patch changes the way RewriteStatepointsForGC pass creates
gc_relocate. For each relocated pointer, we erase the type of pointers
and create an unified gc_relocate of type i8 addrspace(1)*. Then a
bitcast is inserted to convert the gc_relocate to the correct type. In
this way, gc_relocate does not need to deal with different types of
pointers and the unsupported type mangling is no longer a problem. This
change would also ease further merge when LLVM erases types of pointers
and introduces an unified pointer type.
Some minor changes are also introduced to gc_relocate related part in
InstCombineCalls, CodeGenPrepare, and Verifier accordingly.
Patch by Chen Li!
Reviewers: reames, AndyAyers, sanjoy
Reviewed By: sanjoy
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D9592
llvm-svn: 237009
2015-05-12 02:49:34 +08:00
|
|
|
; CHECK: %phi1 = phi i64 addrspace(1)* [ %obj.relocated.casted, %entry ], [ %obj3, %joint2 ]
|
2015-03-06 03:52:13 +08:00
|
|
|
%phi1 = phi i64 addrspace(1)* [ %obj, %entry ], [ %obj3, %joint2 ]
|
|
|
|
br i1 %condition, label %use, label %joint2
|
|
|
|
|
|
|
|
use:
|
|
|
|
br label %joint2
|
|
|
|
|
|
|
|
joint2:
|
|
|
|
; CHECK-LABEL: joint2:
|
[RewriteStatepointsForGC] Fix a bug on creating gc_relocate for pointer to vector of pointers
Summary:
In RewriteStatepointsForGC pass, we create a gc_relocate intrinsic for
each relocated pointer, and the gc_relocate has the same type with the
pointer. During the creation of gc_relocate intrinsic, llvm requires to
mangle its type. However, llvm does not support mangling of all possible
types. RewriteStatepointsForGC will hit an assertion failure when it
tries to create a gc_relocate for pointer to vector of pointers because
mangling for vector of pointers is not supported.
This patch changes the way RewriteStatepointsForGC pass creates
gc_relocate. For each relocated pointer, we erase the type of pointers
and create an unified gc_relocate of type i8 addrspace(1)*. Then a
bitcast is inserted to convert the gc_relocate to the correct type. In
this way, gc_relocate does not need to deal with different types of
pointers and the unsupported type mangling is no longer a problem. This
change would also ease further merge when LLVM erases types of pointers
and introduces an unified pointer type.
Some minor changes are also introduced to gc_relocate related part in
InstCombineCalls, CodeGenPrepare, and Verifier accordingly.
Patch by Chen Li!
Reviewers: reames, AndyAyers, sanjoy
Reviewed By: sanjoy
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D9592
llvm-svn: 237009
2015-05-12 02:49:34 +08:00
|
|
|
; CHECK: %phi2 = phi i64 addrspace(1)* [ %obj.relocated.casted, %use ], [ %obj2.relocated.casted, %joint ]
|
|
|
|
; CHECK: %obj3 = getelementptr i64, i64 addrspace(1)* %obj2.relocated.casted, i32 1
|
2015-03-06 03:52:13 +08:00
|
|
|
%phi2 = phi i64 addrspace(1)* [ %obj, %use ], [ %obj2, %joint ]
|
|
|
|
%obj3 = getelementptr i64, i64 addrspace(1)* %obj2, i32 1
|
|
|
|
br label %joint
|
|
|
|
}
|
|
|
|
|
|
|
|
declare i64 addrspace(1)* @generate_obj()
|
|
|
|
|
|
|
|
declare void @consume_obj(i64 addrspace(1)*)
|
|
|
|
|
|
|
|
declare i1 @rt()
|
|
|
|
|
|
|
|
define void @test2() gc "statepoint-example" {
|
|
|
|
; CHECK-LABEL: @test2
|
|
|
|
entry:
|
|
|
|
%obj_init = call i64 addrspace(1)* @generate_obj()
|
|
|
|
%obj = getelementptr i64, i64 addrspace(1)* %obj_init, i32 42
|
|
|
|
br label %loop
|
|
|
|
|
|
|
|
loop:
|
|
|
|
; CHECK: loop:
|
[RewriteStatepointsForGC] Fix a bug on creating gc_relocate for pointer to vector of pointers
Summary:
In RewriteStatepointsForGC pass, we create a gc_relocate intrinsic for
each relocated pointer, and the gc_relocate has the same type with the
pointer. During the creation of gc_relocate intrinsic, llvm requires to
mangle its type. However, llvm does not support mangling of all possible
types. RewriteStatepointsForGC will hit an assertion failure when it
tries to create a gc_relocate for pointer to vector of pointers because
mangling for vector of pointers is not supported.
This patch changes the way RewriteStatepointsForGC pass creates
gc_relocate. For each relocated pointer, we erase the type of pointers
and create an unified gc_relocate of type i8 addrspace(1)*. Then a
bitcast is inserted to convert the gc_relocate to the correct type. In
this way, gc_relocate does not need to deal with different types of
pointers and the unsupported type mangling is no longer a problem. This
change would also ease further merge when LLVM erases types of pointers
and introduces an unified pointer type.
Some minor changes are also introduced to gc_relocate related part in
InstCombineCalls, CodeGenPrepare, and Verifier accordingly.
Patch by Chen Li!
Reviewers: reames, AndyAyers, sanjoy
Reviewed By: sanjoy
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D9592
llvm-svn: 237009
2015-05-12 02:49:34 +08:00
|
|
|
; CHECK-DAG: [ %obj_init.relocated.casted, %loop.backedge ]
|
2015-03-06 03:52:13 +08:00
|
|
|
; CHECK-DAG: [ %obj_init, %entry ]
|
[RewriteStatepointsForGC] Fix a bug on creating gc_relocate for pointer to vector of pointers
Summary:
In RewriteStatepointsForGC pass, we create a gc_relocate intrinsic for
each relocated pointer, and the gc_relocate has the same type with the
pointer. During the creation of gc_relocate intrinsic, llvm requires to
mangle its type. However, llvm does not support mangling of all possible
types. RewriteStatepointsForGC will hit an assertion failure when it
tries to create a gc_relocate for pointer to vector of pointers because
mangling for vector of pointers is not supported.
This patch changes the way RewriteStatepointsForGC pass creates
gc_relocate. For each relocated pointer, we erase the type of pointers
and create an unified gc_relocate of type i8 addrspace(1)*. Then a
bitcast is inserted to convert the gc_relocate to the correct type. In
this way, gc_relocate does not need to deal with different types of
pointers and the unsupported type mangling is no longer a problem. This
change would also ease further merge when LLVM erases types of pointers
and introduces an unified pointer type.
Some minor changes are also introduced to gc_relocate related part in
InstCombineCalls, CodeGenPrepare, and Verifier accordingly.
Patch by Chen Li!
Reviewers: reames, AndyAyers, sanjoy
Reviewed By: sanjoy
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D9592
llvm-svn: 237009
2015-05-12 02:49:34 +08:00
|
|
|
; CHECK-DAG: [ %obj.relocated.casted, %loop.backedge ]
|
2015-03-06 03:52:13 +08:00
|
|
|
; CHECK-DAG: [ %obj, %entry ]
|
|
|
|
%index = phi i32 [ 0, %entry ], [ %index.inc, %loop.backedge ]
|
|
|
|
; CHECK-NOT: %location = getelementptr i64, i64 addrspace(1)* %obj, i32 %index
|
|
|
|
%location = getelementptr i64, i64 addrspace(1)* %obj, i32 %index
|
|
|
|
call void @consume_obj(i64 addrspace(1)* %location)
|
|
|
|
%index.inc = add i32 %index, 1
|
|
|
|
%condition = call i1 @rt()
|
|
|
|
br i1 %condition, label %loop_x, label %loop_y
|
|
|
|
|
|
|
|
loop_x:
|
|
|
|
br label %loop.backedge
|
|
|
|
|
|
|
|
loop.backedge:
|
Extend the statepoint intrinsic to allow statepoints to be marked as transitions from GC-aware code to code that is not GC-aware.
This changes the shape of the statepoint intrinsic from:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 unused, ...call args, i32 # deopt args, ...deopt args, ...gc args)
to:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 flags, ...call args, i32 # transition args, ...transition args, i32 # deopt args, ...deopt args, ...gc args)
This extension offers the backend the opportunity to insert (somewhat) arbitrary code to manage the transition from GC-aware code to code that is not GC-aware and back.
In order to support the injection of transition code, this extension wraps the STATEPOINT ISD node generated by the usual lowering lowering with two additional nodes: GC_TRANSITION_START and GC_TRANSITION_END. The transition arguments that were passed passed to the intrinsic (if any) are lowered and provided as operands to these nodes and may be used by the backend during code generation.
Eventually, the lowering of the GC_TRANSITION_{START,END} nodes should be informed by the GC strategy in use for the function containing the intrinsic call; for now, these nodes are instead replaced with no-ops.
Differential Revision: http://reviews.llvm.org/D9501
llvm-svn: 236888
2015-05-09 02:07:42 +08:00
|
|
|
%safepoint_token = call i32 (void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* @do_safepoint, i32 0, i32 0, i32 0, i32 0)
|
2015-03-06 03:52:13 +08:00
|
|
|
br label %loop
|
|
|
|
|
|
|
|
loop_y:
|
|
|
|
br label %loop.backedge
|
|
|
|
}
|
|
|
|
|
|
|
|
declare void @some_call(i8 addrspace(1)*)
|
|
|
|
|
|
|
|
define void @relocate_merge(i1 %cnd, i8 addrspace(1)* %arg) gc "statepoint-example" {
|
|
|
|
; CHECK-LABEL: @relocate_merge
|
|
|
|
bci_0:
|
|
|
|
br i1 %cnd, label %if_branch, label %else_branch
|
|
|
|
|
|
|
|
if_branch:
|
|
|
|
; CHECK-LABEL: if_branch:
|
|
|
|
; CHECK: gc.statepoint
|
|
|
|
; CHECK: gc.relocate
|
Extend the statepoint intrinsic to allow statepoints to be marked as transitions from GC-aware code to code that is not GC-aware.
This changes the shape of the statepoint intrinsic from:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 unused, ...call args, i32 # deopt args, ...deopt args, ...gc args)
to:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 flags, ...call args, i32 # transition args, ...transition args, i32 # deopt args, ...deopt args, ...gc args)
This extension offers the backend the opportunity to insert (somewhat) arbitrary code to manage the transition from GC-aware code to code that is not GC-aware and back.
In order to support the injection of transition code, this extension wraps the STATEPOINT ISD node generated by the usual lowering lowering with two additional nodes: GC_TRANSITION_START and GC_TRANSITION_END. The transition arguments that were passed passed to the intrinsic (if any) are lowered and provided as operands to these nodes and may be used by the backend during code generation.
Eventually, the lowering of the GC_TRANSITION_{START,END} nodes should be informed by the GC strategy in use for the function containing the intrinsic call; for now, these nodes are instead replaced with no-ops.
Differential Revision: http://reviews.llvm.org/D9501
llvm-svn: 236888
2015-05-09 02:07:42 +08:00
|
|
|
%safepoint_token = call i32 (void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* @foo, i32 0, i32 0, i32 0, i32 0)
|
2015-03-06 03:52:13 +08:00
|
|
|
br label %join
|
|
|
|
|
|
|
|
else_branch:
|
|
|
|
; CHECK-LABEL: else_branch:
|
|
|
|
; CHECK: gc.statepoint
|
|
|
|
; CHECK: gc.relocate
|
Extend the statepoint intrinsic to allow statepoints to be marked as transitions from GC-aware code to code that is not GC-aware.
This changes the shape of the statepoint intrinsic from:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 unused, ...call args, i32 # deopt args, ...deopt args, ...gc args)
to:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 flags, ...call args, i32 # transition args, ...transition args, i32 # deopt args, ...deopt args, ...gc args)
This extension offers the backend the opportunity to insert (somewhat) arbitrary code to manage the transition from GC-aware code to code that is not GC-aware and back.
In order to support the injection of transition code, this extension wraps the STATEPOINT ISD node generated by the usual lowering lowering with two additional nodes: GC_TRANSITION_START and GC_TRANSITION_END. The transition arguments that were passed passed to the intrinsic (if any) are lowered and provided as operands to these nodes and may be used by the backend during code generation.
Eventually, the lowering of the GC_TRANSITION_{START,END} nodes should be informed by the GC strategy in use for the function containing the intrinsic call; for now, these nodes are instead replaced with no-ops.
Differential Revision: http://reviews.llvm.org/D9501
llvm-svn: 236888
2015-05-09 02:07:42 +08:00
|
|
|
%safepoint_token1 = call i32 (void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* @foo, i32 0, i32 0, i32 0, i32 0)
|
2015-03-06 03:52:13 +08:00
|
|
|
br label %join
|
|
|
|
|
|
|
|
join:
|
|
|
|
; We need to end up with a single relocation phi updated from both paths
|
|
|
|
; CHECK-LABEL: join:
|
|
|
|
; CHECK: phi i8 addrspace(1)*
|
|
|
|
; CHECK-DAG: [ %arg.relocated, %if_branch ]
|
|
|
|
; CHECK-DAG: [ %arg.relocated4, %else_branch ]
|
|
|
|
; CHECK-NOT: phi
|
[opaque pointer type] Add textual IR support for explicit type parameter to the call instruction
See r230786 and r230794 for similar changes to gep and load
respectively.
Call is a bit different because it often doesn't have a single explicit
type - usually the type is deduced from the arguments, and just the
return type is explicit. In those cases there's no need to change the
IR.
When that's not the case, the IR usually contains the pointer type of
the first operand - but since typed pointers are going away, that
representation is insufficient so I'm just stripping the "pointerness"
of the explicit type away.
This does make the IR a bit weird - it /sort of/ reads like the type of
the first operand: "call void () %x(" but %x is actually of type "void
()*" and will eventually be just of type "ptr". But this seems not too
bad and I don't think it would benefit from repeating the type
("void (), void () * %x(" and then eventually "void (), ptr %x(") as has
been done with gep and load.
This also has a side benefit: since the explicit type is no longer a
pointer, there's no ambiguity between an explicit type and a function
that returns a function pointer. Previously this case needed an explicit
type (eg: a function returning a void() function was written as
"call void () () * @x(" rather than "call void () * @x(" because of the
ambiguity between a function returning a pointer to a void() function
and a function returning void).
No ambiguity means even function pointer return types can just be
written alone, without writing the whole function's type.
This leaves /only/ the varargs case where the explicit type is required.
Given the special type syntax in call instructions, the regex-fu used
for migration was a bit more involved in its own unique way (as every
one of these is) so here it is. Use it in conjunction with the apply.sh
script and associated find/xargs commands I've provided in rr230786 to
migrate your out of tree tests. Do let me know if any of this doesn't
cover your cases & we can iterate on a more general script/regexes to
help others with out of tree tests.
About 9 test cases couldn't be automatically migrated - half of those
were functions returning function pointers, where I just had to manually
delete the function argument types now that we didn't need an explicit
function type there. The other half were typedefs of function types used
in calls - just had to manually drop the * from those.
import fileinput
import sys
import re
pat = re.compile(r'((?:=|:|^|\s)call\s(?:[^@]*?))(\s*$|\s*(?:(?:\[\[[a-zA-Z0-9_]+\]\]|[@%](?:(")?[\\\?@a-zA-Z0-9_.]*?(?(3)"|)|{{.*}}))(?:\(|$)|undef|inttoptr|bitcast|null|asm).*$)')
addrspace_end = re.compile(r"addrspace\(\d+\)\s*\*$")
func_end = re.compile("(?:void.*|\)\s*)\*$")
def conv(match, line):
if not match or re.search(addrspace_end, match.group(1)) or not re.search(func_end, match.group(1)):
return line
return line[:match.start()] + match.group(1)[:match.group(1).rfind('*')].rstrip() + match.group(2) + line[match.end():]
for line in sys.stdin:
sys.stdout.write(conv(re.search(pat, line), line))
llvm-svn: 235145
2015-04-17 07:24:18 +08:00
|
|
|
call void (i8 addrspace(1)*) @some_call(i8 addrspace(1)* %arg)
|
2015-03-06 03:52:13 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; Make sure a use in a statepoint gets properly relocated at a previous one.
|
|
|
|
; This is basically just making sure that statepoints aren't accidentally
|
|
|
|
; treated specially.
|
|
|
|
define void @test3(i64 addrspace(1)* %obj) gc "statepoint-example" {
|
|
|
|
entry:
|
|
|
|
; CHECK-LABEL: @test3
|
|
|
|
; CHECK: gc.statepoint
|
|
|
|
; CHECK-NEXT: gc.relocate
|
[RewriteStatepointsForGC] Fix a bug on creating gc_relocate for pointer to vector of pointers
Summary:
In RewriteStatepointsForGC pass, we create a gc_relocate intrinsic for
each relocated pointer, and the gc_relocate has the same type with the
pointer. During the creation of gc_relocate intrinsic, llvm requires to
mangle its type. However, llvm does not support mangling of all possible
types. RewriteStatepointsForGC will hit an assertion failure when it
tries to create a gc_relocate for pointer to vector of pointers because
mangling for vector of pointers is not supported.
This patch changes the way RewriteStatepointsForGC pass creates
gc_relocate. For each relocated pointer, we erase the type of pointers
and create an unified gc_relocate of type i8 addrspace(1)*. Then a
bitcast is inserted to convert the gc_relocate to the correct type. In
this way, gc_relocate does not need to deal with different types of
pointers and the unsupported type mangling is no longer a problem. This
change would also ease further merge when LLVM erases types of pointers
and introduces an unified pointer type.
Some minor changes are also introduced to gc_relocate related part in
InstCombineCalls, CodeGenPrepare, and Verifier accordingly.
Patch by Chen Li!
Reviewers: reames, AndyAyers, sanjoy
Reviewed By: sanjoy
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D9592
llvm-svn: 237009
2015-05-12 02:49:34 +08:00
|
|
|
; CHECK-NEXT: bitcast
|
2015-03-06 03:52:13 +08:00
|
|
|
; CHECK-NEXT: gc.statepoint
|
Extend the statepoint intrinsic to allow statepoints to be marked as transitions from GC-aware code to code that is not GC-aware.
This changes the shape of the statepoint intrinsic from:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 unused, ...call args, i32 # deopt args, ...deopt args, ...gc args)
to:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 flags, ...call args, i32 # transition args, ...transition args, i32 # deopt args, ...deopt args, ...gc args)
This extension offers the backend the opportunity to insert (somewhat) arbitrary code to manage the transition from GC-aware code to code that is not GC-aware and back.
In order to support the injection of transition code, this extension wraps the STATEPOINT ISD node generated by the usual lowering lowering with two additional nodes: GC_TRANSITION_START and GC_TRANSITION_END. The transition arguments that were passed passed to the intrinsic (if any) are lowered and provided as operands to these nodes and may be used by the backend during code generation.
Eventually, the lowering of the GC_TRANSITION_{START,END} nodes should be informed by the GC strategy in use for the function containing the intrinsic call; for now, these nodes are instead replaced with no-ops.
Differential Revision: http://reviews.llvm.org/D9501
llvm-svn: 236888
2015-05-09 02:07:42 +08:00
|
|
|
%safepoint_token = call i32 (void (i64)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidi64f(void (i64)* undef, i32 1, i32 0, i64 undef, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
|
|
|
|
%safepoint_token1 = call i32 (i32 (i64 addrspace(1)*)*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i32p1i64f(i32 (i64 addrspace(1)*)* undef, i32 1, i32 0, i64 addrspace(1)* %obj, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
|
2015-03-06 03:52:13 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-03-06 06:28:06 +08:00
|
|
|
; Check specifically for the case where the result of a statepoint needs to
|
|
|
|
; be relocated itself
|
|
|
|
define void @test4() gc "statepoint-example" {
|
|
|
|
; CHECK-LABEL: @test4
|
|
|
|
; CHECK: gc.statepoint
|
|
|
|
; CHECK: gc.result
|
|
|
|
; CHECK: gc.statepoint
|
|
|
|
; CHECK: gc.relocate
|
|
|
|
; CHECK: @use(i8 addrspace(1)* %res.relocated)
|
Extend the statepoint intrinsic to allow statepoints to be marked as transitions from GC-aware code to code that is not GC-aware.
This changes the shape of the statepoint intrinsic from:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 unused, ...call args, i32 # deopt args, ...deopt args, ...gc args)
to:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 flags, ...call args, i32 # transition args, ...transition args, i32 # deopt args, ...deopt args, ...gc args)
This extension offers the backend the opportunity to insert (somewhat) arbitrary code to manage the transition from GC-aware code to code that is not GC-aware and back.
In order to support the injection of transition code, this extension wraps the STATEPOINT ISD node generated by the usual lowering lowering with two additional nodes: GC_TRANSITION_START and GC_TRANSITION_END. The transition arguments that were passed passed to the intrinsic (if any) are lowered and provided as operands to these nodes and may be used by the backend during code generation.
Eventually, the lowering of the GC_TRANSITION_{START,END} nodes should be informed by the GC strategy in use for the function containing the intrinsic call; for now, these nodes are instead replaced with no-ops.
Differential Revision: http://reviews.llvm.org/D9501
llvm-svn: 236888
2015-05-09 02:07:42 +08:00
|
|
|
%safepoint_token2 = tail call i32 (i8 addrspace(1)* ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_p1i8f(i8 addrspace(1)* ()* undef, i32 0, i32 0, i32 0, i32 0)
|
2015-03-06 06:28:06 +08:00
|
|
|
%res = call i8 addrspace(1)* @llvm.experimental.gc.result.ptr.p1i8(i32 %safepoint_token2)
|
Extend the statepoint intrinsic to allow statepoints to be marked as transitions from GC-aware code to code that is not GC-aware.
This changes the shape of the statepoint intrinsic from:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 unused, ...call args, i32 # deopt args, ...deopt args, ...gc args)
to:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 flags, ...call args, i32 # transition args, ...transition args, i32 # deopt args, ...deopt args, ...gc args)
This extension offers the backend the opportunity to insert (somewhat) arbitrary code to manage the transition from GC-aware code to code that is not GC-aware and back.
In order to support the injection of transition code, this extension wraps the STATEPOINT ISD node generated by the usual lowering lowering with two additional nodes: GC_TRANSITION_START and GC_TRANSITION_END. The transition arguments that were passed passed to the intrinsic (if any) are lowered and provided as operands to these nodes and may be used by the backend during code generation.
Eventually, the lowering of the GC_TRANSITION_{START,END} nodes should be informed by the GC strategy in use for the function containing the intrinsic call; for now, these nodes are instead replaced with no-ops.
Differential Revision: http://reviews.llvm.org/D9501
llvm-svn: 236888
2015-05-09 02:07:42 +08:00
|
|
|
call i32 (i8 addrspace(1)* ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_p1i8f(i8 addrspace(1)* ()* undef, i32 0, i32 0, i32 0, i32 0)
|
[opaque pointer type] Add textual IR support for explicit type parameter to the call instruction
See r230786 and r230794 for similar changes to gep and load
respectively.
Call is a bit different because it often doesn't have a single explicit
type - usually the type is deduced from the arguments, and just the
return type is explicit. In those cases there's no need to change the
IR.
When that's not the case, the IR usually contains the pointer type of
the first operand - but since typed pointers are going away, that
representation is insufficient so I'm just stripping the "pointerness"
of the explicit type away.
This does make the IR a bit weird - it /sort of/ reads like the type of
the first operand: "call void () %x(" but %x is actually of type "void
()*" and will eventually be just of type "ptr". But this seems not too
bad and I don't think it would benefit from repeating the type
("void (), void () * %x(" and then eventually "void (), ptr %x(") as has
been done with gep and load.
This also has a side benefit: since the explicit type is no longer a
pointer, there's no ambiguity between an explicit type and a function
that returns a function pointer. Previously this case needed an explicit
type (eg: a function returning a void() function was written as
"call void () () * @x(" rather than "call void () * @x(" because of the
ambiguity between a function returning a pointer to a void() function
and a function returning void).
No ambiguity means even function pointer return types can just be
written alone, without writing the whole function's type.
This leaves /only/ the varargs case where the explicit type is required.
Given the special type syntax in call instructions, the regex-fu used
for migration was a bit more involved in its own unique way (as every
one of these is) so here it is. Use it in conjunction with the apply.sh
script and associated find/xargs commands I've provided in rr230786 to
migrate your out of tree tests. Do let me know if any of this doesn't
cover your cases & we can iterate on a more general script/regexes to
help others with out of tree tests.
About 9 test cases couldn't be automatically migrated - half of those
were functions returning function pointers, where I just had to manually
delete the function argument types now that we didn't need an explicit
function type there. The other half were typedefs of function types used
in calls - just had to manually drop the * from those.
import fileinput
import sys
import re
pat = re.compile(r'((?:=|:|^|\s)call\s(?:[^@]*?))(\s*$|\s*(?:(?:\[\[[a-zA-Z0-9_]+\]\]|[@%](?:(")?[\\\?@a-zA-Z0-9_.]*?(?(3)"|)|{{.*}}))(?:\(|$)|undef|inttoptr|bitcast|null|asm).*$)')
addrspace_end = re.compile(r"addrspace\(\d+\)\s*\*$")
func_end = re.compile("(?:void.*|\)\s*)\*$")
def conv(match, line):
if not match or re.search(addrspace_end, match.group(1)) or not re.search(func_end, match.group(1)):
return line
return line[:match.start()] + match.group(1)[:match.group(1).rfind('*')].rstrip() + match.group(2) + line[match.end():]
for line in sys.stdin:
sys.stdout.write(conv(re.search(pat, line), line))
llvm-svn: 235145
2015-04-17 07:24:18 +08:00
|
|
|
call void (...) @use(i8 addrspace(1)* %res)
|
2015-03-06 06:28:06 +08:00
|
|
|
unreachable
|
|
|
|
}
|
2015-03-06 03:52:13 +08:00
|
|
|
|
2015-03-06 06:28:06 +08:00
|
|
|
|
|
|
|
; Test updating a phi where not all inputs are live to begin with
|
|
|
|
define void @test5(i8 addrspace(1)* %arg) gc "statepoint-example" {
|
|
|
|
; CHECK-LABEL: test5
|
|
|
|
entry:
|
Extend the statepoint intrinsic to allow statepoints to be marked as transitions from GC-aware code to code that is not GC-aware.
This changes the shape of the statepoint intrinsic from:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 unused, ...call args, i32 # deopt args, ...deopt args, ...gc args)
to:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 flags, ...call args, i32 # transition args, ...transition args, i32 # deopt args, ...deopt args, ...gc args)
This extension offers the backend the opportunity to insert (somewhat) arbitrary code to manage the transition from GC-aware code to code that is not GC-aware and back.
In order to support the injection of transition code, this extension wraps the STATEPOINT ISD node generated by the usual lowering lowering with two additional nodes: GC_TRANSITION_START and GC_TRANSITION_END. The transition arguments that were passed passed to the intrinsic (if any) are lowered and provided as operands to these nodes and may be used by the backend during code generation.
Eventually, the lowering of the GC_TRANSITION_{START,END} nodes should be informed by the GC strategy in use for the function containing the intrinsic call; for now, these nodes are instead replaced with no-ops.
Differential Revision: http://reviews.llvm.org/D9501
llvm-svn: 236888
2015-05-09 02:07:42 +08:00
|
|
|
call i32 (i8 addrspace(1)* ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_p1i8f(i8 addrspace(1)* ()* undef, i32 0, i32 0, i32 0, i32 0)
|
2015-03-06 06:28:06 +08:00
|
|
|
switch i32 undef, label %kill [
|
|
|
|
i32 10, label %merge
|
|
|
|
i32 13, label %merge
|
|
|
|
]
|
|
|
|
|
|
|
|
kill:
|
|
|
|
br label %merge
|
|
|
|
|
|
|
|
merge:
|
|
|
|
; CHECK: merge:
|
|
|
|
; CHECK: %test = phi i8 addrspace(1)
|
|
|
|
; CHECK-DAG: [ null, %kill ]
|
|
|
|
; CHECK-DAG: [ %arg.relocated, %entry ]
|
|
|
|
; CHECK-DAG: [ %arg.relocated, %entry ]
|
|
|
|
%test = phi i8 addrspace(1)* [ null, %kill ], [ %arg, %entry ], [ %arg, %entry ]
|
[opaque pointer type] Add textual IR support for explicit type parameter to the call instruction
See r230786 and r230794 for similar changes to gep and load
respectively.
Call is a bit different because it often doesn't have a single explicit
type - usually the type is deduced from the arguments, and just the
return type is explicit. In those cases there's no need to change the
IR.
When that's not the case, the IR usually contains the pointer type of
the first operand - but since typed pointers are going away, that
representation is insufficient so I'm just stripping the "pointerness"
of the explicit type away.
This does make the IR a bit weird - it /sort of/ reads like the type of
the first operand: "call void () %x(" but %x is actually of type "void
()*" and will eventually be just of type "ptr". But this seems not too
bad and I don't think it would benefit from repeating the type
("void (), void () * %x(" and then eventually "void (), ptr %x(") as has
been done with gep and load.
This also has a side benefit: since the explicit type is no longer a
pointer, there's no ambiguity between an explicit type and a function
that returns a function pointer. Previously this case needed an explicit
type (eg: a function returning a void() function was written as
"call void () () * @x(" rather than "call void () * @x(" because of the
ambiguity between a function returning a pointer to a void() function
and a function returning void).
No ambiguity means even function pointer return types can just be
written alone, without writing the whole function's type.
This leaves /only/ the varargs case where the explicit type is required.
Given the special type syntax in call instructions, the regex-fu used
for migration was a bit more involved in its own unique way (as every
one of these is) so here it is. Use it in conjunction with the apply.sh
script and associated find/xargs commands I've provided in rr230786 to
migrate your out of tree tests. Do let me know if any of this doesn't
cover your cases & we can iterate on a more general script/regexes to
help others with out of tree tests.
About 9 test cases couldn't be automatically migrated - half of those
were functions returning function pointers, where I just had to manually
delete the function argument types now that we didn't need an explicit
function type there. The other half were typedefs of function types used
in calls - just had to manually drop the * from those.
import fileinput
import sys
import re
pat = re.compile(r'((?:=|:|^|\s)call\s(?:[^@]*?))(\s*$|\s*(?:(?:\[\[[a-zA-Z0-9_]+\]\]|[@%](?:(")?[\\\?@a-zA-Z0-9_.]*?(?(3)"|)|{{.*}}))(?:\(|$)|undef|inttoptr|bitcast|null|asm).*$)')
addrspace_end = re.compile(r"addrspace\(\d+\)\s*\*$")
func_end = re.compile("(?:void.*|\)\s*)\*$")
def conv(match, line):
if not match or re.search(addrspace_end, match.group(1)) or not re.search(func_end, match.group(1)):
return line
return line[:match.start()] + match.group(1)[:match.group(1).rfind('*')].rstrip() + match.group(2) + line[match.end():]
for line in sys.stdin:
sys.stdout.write(conv(re.search(pat, line), line))
llvm-svn: 235145
2015-04-17 07:24:18 +08:00
|
|
|
call void (...) @use(i8 addrspace(1)* %test)
|
2015-03-06 06:28:06 +08:00
|
|
|
unreachable
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
; Check to make sure we handle values live over an entry statepoint
|
|
|
|
define void @test6(i8 addrspace(1)* %arg1, i8 addrspace(1)* %arg2,
|
|
|
|
i8 addrspace(1)* %arg3) gc "statepoint-example" {
|
|
|
|
; CHECK-LABEL: @test6
|
|
|
|
entry:
|
|
|
|
br i1 undef, label %gc.safepoint_poll.exit2, label %do_safepoint
|
|
|
|
|
|
|
|
do_safepoint:
|
|
|
|
; CHECK-LABEL: do_safepoint:
|
|
|
|
; CHECK: gc.statepoint
|
|
|
|
; CHECK: arg1.relocated =
|
|
|
|
; CHECK: arg2.relocated =
|
|
|
|
; CHECK: arg3.relocated =
|
Extend the statepoint intrinsic to allow statepoints to be marked as transitions from GC-aware code to code that is not GC-aware.
This changes the shape of the statepoint intrinsic from:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 unused, ...call args, i32 # deopt args, ...deopt args, ...gc args)
to:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 flags, ...call args, i32 # transition args, ...transition args, i32 # deopt args, ...deopt args, ...gc args)
This extension offers the backend the opportunity to insert (somewhat) arbitrary code to manage the transition from GC-aware code to code that is not GC-aware and back.
In order to support the injection of transition code, this extension wraps the STATEPOINT ISD node generated by the usual lowering lowering with two additional nodes: GC_TRANSITION_START and GC_TRANSITION_END. The transition arguments that were passed passed to the intrinsic (if any) are lowered and provided as operands to these nodes and may be used by the backend during code generation.
Eventually, the lowering of the GC_TRANSITION_{START,END} nodes should be informed by the GC strategy in use for the function containing the intrinsic call; for now, these nodes are instead replaced with no-ops.
Differential Revision: http://reviews.llvm.org/D9501
llvm-svn: 236888
2015-05-09 02:07:42 +08:00
|
|
|
call i32 (void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* @foo, i32 0, i32 0, i32 0, i32 3, i8 addrspace(1)* %arg1, i8 addrspace(1)* %arg2, i8 addrspace(1)* %arg3)
|
2015-03-06 06:28:06 +08:00
|
|
|
br label %gc.safepoint_poll.exit2
|
|
|
|
|
|
|
|
gc.safepoint_poll.exit2:
|
|
|
|
; CHECK-LABEL: gc.safepoint_poll.exit2:
|
|
|
|
; CHECK: phi i8 addrspace(1)*
|
|
|
|
; CHECK-DAG: [ %arg3, %entry ]
|
|
|
|
; CHECK-DAG: [ %arg3.relocated, %do_safepoint ]
|
|
|
|
; CHECK: phi i8 addrspace(1)*
|
|
|
|
; CHECK-DAG: [ %arg2, %entry ]
|
|
|
|
; CHECK-DAG: [ %arg2.relocated, %do_safepoint ]
|
|
|
|
; CHECK: phi i8 addrspace(1)*
|
|
|
|
; CHECK-DAG: [ %arg1, %entry ]
|
|
|
|
; CHECK-DAG: [ %arg1.relocated, %do_safepoint ]
|
[opaque pointer type] Add textual IR support for explicit type parameter to the call instruction
See r230786 and r230794 for similar changes to gep and load
respectively.
Call is a bit different because it often doesn't have a single explicit
type - usually the type is deduced from the arguments, and just the
return type is explicit. In those cases there's no need to change the
IR.
When that's not the case, the IR usually contains the pointer type of
the first operand - but since typed pointers are going away, that
representation is insufficient so I'm just stripping the "pointerness"
of the explicit type away.
This does make the IR a bit weird - it /sort of/ reads like the type of
the first operand: "call void () %x(" but %x is actually of type "void
()*" and will eventually be just of type "ptr". But this seems not too
bad and I don't think it would benefit from repeating the type
("void (), void () * %x(" and then eventually "void (), ptr %x(") as has
been done with gep and load.
This also has a side benefit: since the explicit type is no longer a
pointer, there's no ambiguity between an explicit type and a function
that returns a function pointer. Previously this case needed an explicit
type (eg: a function returning a void() function was written as
"call void () () * @x(" rather than "call void () * @x(" because of the
ambiguity between a function returning a pointer to a void() function
and a function returning void).
No ambiguity means even function pointer return types can just be
written alone, without writing the whole function's type.
This leaves /only/ the varargs case where the explicit type is required.
Given the special type syntax in call instructions, the regex-fu used
for migration was a bit more involved in its own unique way (as every
one of these is) so here it is. Use it in conjunction with the apply.sh
script and associated find/xargs commands I've provided in rr230786 to
migrate your out of tree tests. Do let me know if any of this doesn't
cover your cases & we can iterate on a more general script/regexes to
help others with out of tree tests.
About 9 test cases couldn't be automatically migrated - half of those
were functions returning function pointers, where I just had to manually
delete the function argument types now that we didn't need an explicit
function type there. The other half were typedefs of function types used
in calls - just had to manually drop the * from those.
import fileinput
import sys
import re
pat = re.compile(r'((?:=|:|^|\s)call\s(?:[^@]*?))(\s*$|\s*(?:(?:\[\[[a-zA-Z0-9_]+\]\]|[@%](?:(")?[\\\?@a-zA-Z0-9_.]*?(?(3)"|)|{{.*}}))(?:\(|$)|undef|inttoptr|bitcast|null|asm).*$)')
addrspace_end = re.compile(r"addrspace\(\d+\)\s*\*$")
func_end = re.compile("(?:void.*|\)\s*)\*$")
def conv(match, line):
if not match or re.search(addrspace_end, match.group(1)) or not re.search(func_end, match.group(1)):
return line
return line[:match.start()] + match.group(1)[:match.group(1).rfind('*')].rstrip() + match.group(2) + line[match.end():]
for line in sys.stdin:
sys.stdout.write(conv(re.search(pat, line), line))
llvm-svn: 235145
2015-04-17 07:24:18 +08:00
|
|
|
call void (...) @use(i8 addrspace(1)* %arg1, i8 addrspace(1)* %arg2, i8 addrspace(1)* %arg3)
|
2015-03-06 06:28:06 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; Check relocation in a loop nest where a relocation happens in the outer
|
|
|
|
; but not the inner loop
|
|
|
|
define void @test_outer_loop(i8 addrspace(1)* %arg1, i8 addrspace(1)* %arg2,
|
|
|
|
i1 %cmp) gc "statepoint-example" {
|
|
|
|
; CHECK-LABEL: @test_outer_loop
|
|
|
|
bci_0:
|
|
|
|
br label %outer-loop
|
|
|
|
|
|
|
|
outer-loop:
|
|
|
|
; CHECK-LABEL: outer-loop:
|
|
|
|
; CHECK: phi i8 addrspace(1)* [ %arg2, %bci_0 ], [ %arg2.relocated, %outer-inc ]
|
|
|
|
; CHECK: phi i8 addrspace(1)* [ %arg1, %bci_0 ], [ %arg1.relocated, %outer-inc ]
|
|
|
|
br label %inner-loop
|
|
|
|
|
|
|
|
inner-loop:
|
|
|
|
br i1 %cmp, label %inner-loop, label %outer-inc
|
|
|
|
|
|
|
|
outer-inc:
|
|
|
|
; CHECK-LABEL: outer-inc:
|
|
|
|
; CHECK: %arg1.relocated
|
|
|
|
; CHECK: %arg2.relocated
|
Extend the statepoint intrinsic to allow statepoints to be marked as transitions from GC-aware code to code that is not GC-aware.
This changes the shape of the statepoint intrinsic from:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 unused, ...call args, i32 # deopt args, ...deopt args, ...gc args)
to:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 flags, ...call args, i32 # transition args, ...transition args, i32 # deopt args, ...deopt args, ...gc args)
This extension offers the backend the opportunity to insert (somewhat) arbitrary code to manage the transition from GC-aware code to code that is not GC-aware and back.
In order to support the injection of transition code, this extension wraps the STATEPOINT ISD node generated by the usual lowering lowering with two additional nodes: GC_TRANSITION_START and GC_TRANSITION_END. The transition arguments that were passed passed to the intrinsic (if any) are lowered and provided as operands to these nodes and may be used by the backend during code generation.
Eventually, the lowering of the GC_TRANSITION_{START,END} nodes should be informed by the GC strategy in use for the function containing the intrinsic call; for now, these nodes are instead replaced with no-ops.
Differential Revision: http://reviews.llvm.org/D9501
llvm-svn: 236888
2015-05-09 02:07:42 +08:00
|
|
|
%safepoint_token = call i32 (void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* @foo, i32 0, i32 0, i32 0, i32 2, i8 addrspace(1)* %arg1, i8 addrspace(1)* %arg2)
|
2015-03-06 06:28:06 +08:00
|
|
|
br label %outer-loop
|
|
|
|
}
|
|
|
|
|
|
|
|
; Check that both inner and outer loops get phis when relocation is in
|
|
|
|
; inner loop
|
|
|
|
define void @test_inner_loop(i8 addrspace(1)* %arg1, i8 addrspace(1)* %arg2,
|
|
|
|
i1 %cmp) gc "statepoint-example" {
|
|
|
|
; CHECK-LABEL: @test_inner_loop
|
|
|
|
bci_0:
|
|
|
|
br label %outer-loop
|
|
|
|
|
|
|
|
outer-loop:
|
|
|
|
; CHECK-LABEL: outer-loop:
|
|
|
|
; CHECK: phi i8 addrspace(1)* [ %arg2, %bci_0 ], [ %arg2.relocated, %outer-inc ]
|
|
|
|
; CHECK: phi i8 addrspace(1)* [ %arg1, %bci_0 ], [ %arg1.relocated, %outer-inc ]
|
|
|
|
br label %inner-loop
|
|
|
|
|
|
|
|
inner-loop:
|
|
|
|
; CHECK-LABEL: inner-loop
|
|
|
|
; CHECK: phi i8 addrspace(1)*
|
|
|
|
; CHECK-DAG: %outer-loop ]
|
|
|
|
; CHECK-DAG: [ %arg2.relocated, %inner-loop ]
|
|
|
|
; CHECKL phi i8 addrspace(1)*
|
|
|
|
; CHECK-DAG: %outer-loop ]
|
|
|
|
; CHECK-DAG: [ %arg1.relocated, %inner-loop ]
|
|
|
|
; CHECK: gc.statepoint
|
|
|
|
; CHECK: %arg1.relocated
|
|
|
|
; CHECK: %arg2.relocated
|
Extend the statepoint intrinsic to allow statepoints to be marked as transitions from GC-aware code to code that is not GC-aware.
This changes the shape of the statepoint intrinsic from:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 unused, ...call args, i32 # deopt args, ...deopt args, ...gc args)
to:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 flags, ...call args, i32 # transition args, ...transition args, i32 # deopt args, ...deopt args, ...gc args)
This extension offers the backend the opportunity to insert (somewhat) arbitrary code to manage the transition from GC-aware code to code that is not GC-aware and back.
In order to support the injection of transition code, this extension wraps the STATEPOINT ISD node generated by the usual lowering lowering with two additional nodes: GC_TRANSITION_START and GC_TRANSITION_END. The transition arguments that were passed passed to the intrinsic (if any) are lowered and provided as operands to these nodes and may be used by the backend during code generation.
Eventually, the lowering of the GC_TRANSITION_{START,END} nodes should be informed by the GC strategy in use for the function containing the intrinsic call; for now, these nodes are instead replaced with no-ops.
Differential Revision: http://reviews.llvm.org/D9501
llvm-svn: 236888
2015-05-09 02:07:42 +08:00
|
|
|
%safepoint_token = call i32 (void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* @foo, i32 0, i32 0, i32 0, i32 2, i8 addrspace(1)* %arg1, i8 addrspace(1)* %arg2)
|
2015-03-06 06:28:06 +08:00
|
|
|
br i1 %cmp, label %inner-loop, label %outer-inc
|
|
|
|
|
|
|
|
outer-inc:
|
|
|
|
; CHECK-LABEL: outer-inc:
|
|
|
|
br label %outer-loop
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
; This test shows why updating just those uses of the original value being
|
|
|
|
; relocated dominated by the inserted relocation is not always sufficient.
|
|
|
|
define i64 addrspace(1)* @test7(i64 addrspace(1)* %obj, i64 addrspace(1)* %obj2, i1 %condition) gc "statepoint-example" {
|
|
|
|
; CHECK-LABEL: @test7
|
|
|
|
entry:
|
|
|
|
br i1 %condition, label %branch2, label %join
|
|
|
|
|
|
|
|
branch2:
|
|
|
|
br i1 %condition, label %callbb, label %join2
|
|
|
|
|
|
|
|
callbb:
|
Extend the statepoint intrinsic to allow statepoints to be marked as transitions from GC-aware code to code that is not GC-aware.
This changes the shape of the statepoint intrinsic from:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 unused, ...call args, i32 # deopt args, ...deopt args, ...gc args)
to:
@llvm.experimental.gc.statepoint(anyptr target, i32 # call args, i32 flags, ...call args, i32 # transition args, ...transition args, i32 # deopt args, ...deopt args, ...gc args)
This extension offers the backend the opportunity to insert (somewhat) arbitrary code to manage the transition from GC-aware code to code that is not GC-aware and back.
In order to support the injection of transition code, this extension wraps the STATEPOINT ISD node generated by the usual lowering lowering with two additional nodes: GC_TRANSITION_START and GC_TRANSITION_END. The transition arguments that were passed passed to the intrinsic (if any) are lowered and provided as operands to these nodes and may be used by the backend during code generation.
Eventually, the lowering of the GC_TRANSITION_{START,END} nodes should be informed by the GC strategy in use for the function containing the intrinsic call; for now, these nodes are instead replaced with no-ops.
Differential Revision: http://reviews.llvm.org/D9501
llvm-svn: 236888
2015-05-09 02:07:42 +08:00
|
|
|
%safepoint_token = call i32 (void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* @foo, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
|
2015-03-06 06:28:06 +08:00
|
|
|
br label %join
|
|
|
|
|
|
|
|
join:
|
|
|
|
; CHECK-LABEL: join:
|
[RewriteStatepointsForGC] Fix a bug on creating gc_relocate for pointer to vector of pointers
Summary:
In RewriteStatepointsForGC pass, we create a gc_relocate intrinsic for
each relocated pointer, and the gc_relocate has the same type with the
pointer. During the creation of gc_relocate intrinsic, llvm requires to
mangle its type. However, llvm does not support mangling of all possible
types. RewriteStatepointsForGC will hit an assertion failure when it
tries to create a gc_relocate for pointer to vector of pointers because
mangling for vector of pointers is not supported.
This patch changes the way RewriteStatepointsForGC pass creates
gc_relocate. For each relocated pointer, we erase the type of pointers
and create an unified gc_relocate of type i8 addrspace(1)*. Then a
bitcast is inserted to convert the gc_relocate to the correct type. In
this way, gc_relocate does not need to deal with different types of
pointers and the unsupported type mangling is no longer a problem. This
change would also ease further merge when LLVM erases types of pointers
and introduces an unified pointer type.
Some minor changes are also introduced to gc_relocate related part in
InstCombineCalls, CodeGenPrepare, and Verifier accordingly.
Patch by Chen Li!
Reviewers: reames, AndyAyers, sanjoy
Reviewed By: sanjoy
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D9592
llvm-svn: 237009
2015-05-12 02:49:34 +08:00
|
|
|
; CHECK: phi i64 addrspace(1)* [ %obj.relocated.casted, %callbb ], [ %obj, %entry ]
|
2015-03-06 06:28:06 +08:00
|
|
|
; CHECK: phi i64 addrspace(1)*
|
|
|
|
; CHECK-DAG: [ %obj, %entry ]
|
[RewriteStatepointsForGC] Fix a bug on creating gc_relocate for pointer to vector of pointers
Summary:
In RewriteStatepointsForGC pass, we create a gc_relocate intrinsic for
each relocated pointer, and the gc_relocate has the same type with the
pointer. During the creation of gc_relocate intrinsic, llvm requires to
mangle its type. However, llvm does not support mangling of all possible
types. RewriteStatepointsForGC will hit an assertion failure when it
tries to create a gc_relocate for pointer to vector of pointers because
mangling for vector of pointers is not supported.
This patch changes the way RewriteStatepointsForGC pass creates
gc_relocate. For each relocated pointer, we erase the type of pointers
and create an unified gc_relocate of type i8 addrspace(1)*. Then a
bitcast is inserted to convert the gc_relocate to the correct type. In
this way, gc_relocate does not need to deal with different types of
pointers and the unsupported type mangling is no longer a problem. This
change would also ease further merge when LLVM erases types of pointers
and introduces an unified pointer type.
Some minor changes are also introduced to gc_relocate related part in
InstCombineCalls, CodeGenPrepare, and Verifier accordingly.
Patch by Chen Li!
Reviewers: reames, AndyAyers, sanjoy
Reviewed By: sanjoy
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D9592
llvm-svn: 237009
2015-05-12 02:49:34 +08:00
|
|
|
; CHECK-DAG: [ %obj2.relocated.casted, %callbb ]
|
2015-03-06 06:28:06 +08:00
|
|
|
; This is a phi outside the dominator region of the new defs inserted by
|
|
|
|
; the safepoint, BUT we can't stop the search here or we miss the second
|
|
|
|
; phi below.
|
|
|
|
%phi1 = phi i64 addrspace(1)* [ %obj, %entry ], [ %obj2, %callbb ]
|
|
|
|
br label %join2
|
|
|
|
|
|
|
|
join2:
|
|
|
|
; CHECK-LABEL: join2:
|
|
|
|
; CHECK: phi2 = phi i64 addrspace(1)*
|
|
|
|
; CHECK-DAG: %join ]
|
|
|
|
; CHECK-DAG: [ %obj2, %branch2 ]
|
|
|
|
%phi2 = phi i64 addrspace(1)* [ %obj, %join ], [ %obj2, %branch2 ]
|
|
|
|
ret i64 addrspace(1)* %phi2
|
|
|
|
}
|
2015-03-06 03:52:13 +08:00
|
|
|
|
|
|
|
|
|
|
|
declare void @do_safepoint()
|
|
|
|
|
|
|
|
declare i32 @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()*, i32, i32, ...)
|
2015-03-06 06:28:06 +08:00
|
|
|
declare i32 @llvm.experimental.gc.statepoint.p0f_p1i8f(i8 addrspace(1)* ()*, i32, i32, ...)
|
|
|
|
declare i32 @llvm.experimental.gc.statepoint.p0f_isVoidi64f(void (i64)*, i32, i32, ...)
|
|
|
|
declare i32 @llvm.experimental.gc.statepoint.p0f_i32p1i64f(i32 (i64 addrspace(1)*)*, i32, i32, ...)
|
|
|
|
declare i8 addrspace(1)* @llvm.experimental.gc.result.ptr.p1i8(i32) #3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|