2015-07-02 05:00:00 +08:00
|
|
|
// RUN: %clang_cc1 %s -triple x86_64-pc-win32 -fms-extensions -emit-llvm -o - | FileCheck %s
|
2015-07-10 08:16:25 +08:00
|
|
|
// RUN: %clang_cc1 %s -triple i686-pc-win32 -fms-extensions -emit-llvm -o - | FileCheck %s
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
|
2015-02-05 08:58:46 +08:00
|
|
|
void abort(void) __attribute__((noreturn));
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
void might_crash(void);
|
|
|
|
void cleanup(void);
|
|
|
|
int check_condition(void);
|
|
|
|
void basic_finally(void) {
|
|
|
|
__try {
|
|
|
|
might_crash();
|
|
|
|
} __finally {
|
|
|
|
cleanup();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: define void @basic_finally()
|
|
|
|
// CHECK: invoke void @might_crash()
|
2015-02-05 06:37:07 +08:00
|
|
|
// CHECK: to label %[[invoke_cont:[^ ]*]] unwind label %[[lpad:[^ ]*]]
|
|
|
|
//
|
|
|
|
// CHECK: [[invoke_cont]]
|
2015-07-08 07:23:31 +08:00
|
|
|
// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
|
2015-07-07 08:36:30 +08:00
|
|
|
// CHECK: call void @"\01?fin$0@0@basic_finally@@"({{i8( zeroext)?}} 0, i8* %[[fp]])
|
2015-02-05 06:37:07 +08:00
|
|
|
// CHECK-NEXT: ret void
|
|
|
|
//
|
|
|
|
// CHECK: [[lpad]]
|
2015-10-08 09:13:52 +08:00
|
|
|
// CHECK-NEXT: %[[pad:[^ ]*]] = cleanuppad
|
2015-07-08 07:23:31 +08:00
|
|
|
// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
|
2015-12-12 13:39:21 +08:00
|
|
|
// CHECK: call void @"\01?fin$0@0@basic_finally@@"({{i8( zeroext)?}} 1, i8* %[[fp]])
|
|
|
|
// CHECK-NEXT: cleanupret from %[[pad]] unwind to caller
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
|
2015-07-07 08:36:30 +08:00
|
|
|
// CHECK: define internal void @"\01?fin$0@0@basic_finally@@"({{.*}})
|
2015-04-15 04:59:00 +08:00
|
|
|
// CHECK: call void @cleanup()
|
|
|
|
|
2015-02-05 06:37:07 +08:00
|
|
|
// Mostly check that we don't double emit 'r' which would crash.
|
|
|
|
void decl_in_finally(void) {
|
|
|
|
__try {
|
|
|
|
might_crash();
|
|
|
|
} __finally {
|
|
|
|
int r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ditto, don't crash double emitting 'l'.
|
|
|
|
void label_in_finally(void) {
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
__try {
|
|
|
|
might_crash();
|
|
|
|
} __finally {
|
|
|
|
l:
|
|
|
|
cleanup();
|
|
|
|
if (check_condition())
|
|
|
|
goto l;
|
|
|
|
}
|
|
|
|
}
|
2015-02-05 06:37:07 +08:00
|
|
|
|
|
|
|
// CHECK-LABEL: define void @label_in_finally()
|
|
|
|
// CHECK: invoke void @might_crash()
|
|
|
|
// CHECK: to label %[[invoke_cont:[^ ]*]] unwind label %[[lpad:[^ ]*]]
|
|
|
|
//
|
|
|
|
// CHECK: [[invoke_cont]]
|
2015-07-08 07:23:31 +08:00
|
|
|
// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
|
2015-07-07 08:36:30 +08:00
|
|
|
// CHECK: call void @"\01?fin$0@0@label_in_finally@@"({{i8( zeroext)?}} 0, i8* %[[fp]])
|
2015-04-15 04:59:00 +08:00
|
|
|
// CHECK: ret void
|
|
|
|
|
2015-07-07 08:36:30 +08:00
|
|
|
// CHECK: define internal void @"\01?fin$0@0@label_in_finally@@"({{.*}})
|
2015-02-05 06:37:07 +08:00
|
|
|
// CHECK: br label %[[l:[^ ]*]]
|
|
|
|
//
|
|
|
|
// CHECK: [[l]]
|
|
|
|
// CHECK: call void @cleanup()
|
|
|
|
// CHECK: call i32 @check_condition()
|
|
|
|
// CHECK: br i1 {{.*}}, label
|
|
|
|
// CHECK: br label %[[l]]
|
|
|
|
|
|
|
|
int crashed;
|
|
|
|
void use_abnormal_termination(void) {
|
|
|
|
__try {
|
|
|
|
might_crash();
|
|
|
|
} __finally {
|
|
|
|
crashed = __abnormal_termination();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: define void @use_abnormal_termination()
|
|
|
|
// CHECK: invoke void @might_crash()
|
|
|
|
// CHECK: to label %[[invoke_cont:[^ ]*]] unwind label %[[lpad:[^ ]*]]
|
|
|
|
//
|
|
|
|
// CHECK: [[invoke_cont]]
|
2015-07-08 07:23:31 +08:00
|
|
|
// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
|
2015-07-07 08:36:30 +08:00
|
|
|
// CHECK: call void @"\01?fin$0@0@use_abnormal_termination@@"({{i8( zeroext)?}} 0, i8* %[[fp]])
|
2015-04-15 04:59:00 +08:00
|
|
|
// CHECK: ret void
|
2015-02-05 06:37:07 +08:00
|
|
|
//
|
|
|
|
// CHECK: [[lpad]]
|
2015-10-08 09:13:52 +08:00
|
|
|
// CHECK-NEXT: %[[pad:[^ ]*]] = cleanuppad
|
2015-07-08 07:23:31 +08:00
|
|
|
// CHECK: %[[fp:[^ ]*]] = call i8* @llvm.localaddress()
|
2015-12-12 13:39:21 +08:00
|
|
|
// CHECK: call void @"\01?fin$0@0@use_abnormal_termination@@"({{i8( zeroext)?}} 1, i8* %[[fp]])
|
|
|
|
// CHECK-NEXT: cleanupret from %[[pad]] unwind to caller
|
2015-02-05 08:58:46 +08:00
|
|
|
|
2015-07-07 08:36:30 +08:00
|
|
|
// CHECK: define internal void @"\01?fin$0@0@use_abnormal_termination@@"({{i8( zeroext)?}} %[[abnormal:abnormal_termination]], i8* %frame_pointer)
|
|
|
|
// CHECK: %[[abnormal_zext:[^ ]*]] = zext i8 %[[abnormal]] to i32
|
2015-04-15 04:59:00 +08:00
|
|
|
// CHECK: store i32 %[[abnormal_zext]], i32* @crashed
|
|
|
|
// CHECK-NEXT: ret void
|
|
|
|
|
2015-02-05 08:58:46 +08:00
|
|
|
void noreturn_noop_finally() {
|
|
|
|
__try {
|
|
|
|
__noop();
|
|
|
|
} __finally {
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: define void @noreturn_noop_finally()
|
2015-07-07 08:36:30 +08:00
|
|
|
// CHECK: call void @"\01?fin$0@0@noreturn_noop_finally@@"({{.*}})
|
2015-04-15 04:59:00 +08:00
|
|
|
// CHECK: ret void
|
|
|
|
|
2015-07-07 08:36:30 +08:00
|
|
|
// CHECK: define internal void @"\01?fin$0@0@noreturn_noop_finally@@"({{.*}})
|
2015-02-05 08:58:46 +08:00
|
|
|
// CHECK: call void @abort()
|
2015-04-15 04:59:00 +08:00
|
|
|
// CHECK: unreachable
|
2015-02-05 08:58:46 +08:00
|
|
|
|
|
|
|
void noreturn_finally() {
|
|
|
|
__try {
|
|
|
|
might_crash();
|
|
|
|
} __finally {
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: define void @noreturn_finally()
|
|
|
|
// CHECK: invoke void @might_crash()
|
|
|
|
// CHECK: to label %[[cont:[^ ]*]] unwind label %[[lpad:[^ ]*]]
|
|
|
|
//
|
|
|
|
// CHECK: [[cont]]
|
2015-07-07 08:36:30 +08:00
|
|
|
// CHECK: call void @"\01?fin$0@0@noreturn_finally@@"({{.*}})
|
2015-04-15 04:59:00 +08:00
|
|
|
// CHECK: ret void
|
2015-02-05 08:58:46 +08:00
|
|
|
//
|
|
|
|
// CHECK: [[lpad]]
|
2015-10-08 09:13:52 +08:00
|
|
|
// CHECK-NEXT: %[[pad:[^ ]*]] = cleanuppad
|
2015-12-12 13:39:21 +08:00
|
|
|
// CHECK: call void @"\01?fin$0@0@noreturn_finally@@"({{.*}})
|
|
|
|
// CHECK-NEXT: cleanupret from %[[pad]] unwind to caller
|
2015-04-15 04:59:00 +08:00
|
|
|
|
2015-07-07 08:36:30 +08:00
|
|
|
// CHECK: define internal void @"\01?fin$0@0@noreturn_finally@@"({{.*}})
|
2015-04-15 04:59:00 +08:00
|
|
|
// CHECK: call void @abort()
|
|
|
|
// CHECK: unreachable
|
Reland r230460 with a test fix for -Asserts builds.
Original CL description:
Produce less broken basic block sequences for __finally blocks.
The way cleanups (such as PerformSEHFinally) get emitted is that codegen
generates some initialization code, then calls the cleanup's Emit() with the
insertion point set to a good place, then the cleanup is supposed to emit its
stuff, and then codegen might tack in a jump or similar to where the insertion
point is after the cleanup.
The PerformSEHFinally cleanup tries to just stash away the block it's supposed
to codegen into, and then does codegen later, into that stashed block. However,
after codegen'ing the __finally block, it used to set the insertion point to
the finally's continuation block (where the __finally cleanup goes when its body
is completed after regular, non-exceptional control flow). That's not correct,
as that block can (and generally does) already ends in a jump. Instead,
remember the insertion point that was current before the __finally got emitted,
and restore that.
Fixes two of the crashes in PR22553.
llvm-svn: 230503
2015-02-26 00:25:00 +08:00
|
|
|
|
|
|
|
int finally_with_return() {
|
|
|
|
__try {
|
|
|
|
return 42;
|
|
|
|
} __finally {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// CHECK-LABEL: define i32 @finally_with_return()
|
2015-07-07 08:36:30 +08:00
|
|
|
// CHECK: call void @"\01?fin$0@0@finally_with_return@@"({{.*}})
|
Reland r230460 with a test fix for -Asserts builds.
Original CL description:
Produce less broken basic block sequences for __finally blocks.
The way cleanups (such as PerformSEHFinally) get emitted is that codegen
generates some initialization code, then calls the cleanup's Emit() with the
insertion point set to a good place, then the cleanup is supposed to emit its
stuff, and then codegen might tack in a jump or similar to where the insertion
point is after the cleanup.
The PerformSEHFinally cleanup tries to just stash away the block it's supposed
to codegen into, and then does codegen later, into that stashed block. However,
after codegen'ing the __finally block, it used to set the insertion point to
the finally's continuation block (where the __finally cleanup goes when its body
is completed after regular, non-exceptional control flow). That's not correct,
as that block can (and generally does) already ends in a jump. Instead,
remember the insertion point that was current before the __finally got emitted,
and restore that.
Fixes two of the crashes in PR22553.
llvm-svn: 230503
2015-02-26 00:25:00 +08:00
|
|
|
// CHECK-NEXT: ret i32 42
|
|
|
|
|
2015-07-07 08:36:30 +08:00
|
|
|
// CHECK: define internal void @"\01?fin$0@0@finally_with_return@@"({{.*}})
|
2015-04-15 04:59:00 +08:00
|
|
|
// CHECK-NOT: br i1
|
|
|
|
// CHECK-NOT: br label
|
|
|
|
// CHECK: ret void
|
|
|
|
|
Reland r230460 with a test fix for -Asserts builds.
Original CL description:
Produce less broken basic block sequences for __finally blocks.
The way cleanups (such as PerformSEHFinally) get emitted is that codegen
generates some initialization code, then calls the cleanup's Emit() with the
insertion point set to a good place, then the cleanup is supposed to emit its
stuff, and then codegen might tack in a jump or similar to where the insertion
point is after the cleanup.
The PerformSEHFinally cleanup tries to just stash away the block it's supposed
to codegen into, and then does codegen later, into that stashed block. However,
after codegen'ing the __finally block, it used to set the insertion point to
the finally's continuation block (where the __finally cleanup goes when its body
is completed after regular, non-exceptional control flow). That's not correct,
as that block can (and generally does) already ends in a jump. Instead,
remember the insertion point that was current before the __finally got emitted,
and restore that.
Fixes two of the crashes in PR22553.
llvm-svn: 230503
2015-02-26 00:25:00 +08:00
|
|
|
int nested___finally___finally() {
|
|
|
|
__try {
|
|
|
|
__try {
|
|
|
|
} __finally {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
} __finally {
|
|
|
|
// Intentionally no return here.
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2015-04-15 04:59:00 +08:00
|
|
|
|
Reland r230460 with a test fix for -Asserts builds.
Original CL description:
Produce less broken basic block sequences for __finally blocks.
The way cleanups (such as PerformSEHFinally) get emitted is that codegen
generates some initialization code, then calls the cleanup's Emit() with the
insertion point set to a good place, then the cleanup is supposed to emit its
stuff, and then codegen might tack in a jump or similar to where the insertion
point is after the cleanup.
The PerformSEHFinally cleanup tries to just stash away the block it's supposed
to codegen into, and then does codegen later, into that stashed block. However,
after codegen'ing the __finally block, it used to set the insertion point to
the finally's continuation block (where the __finally cleanup goes when its body
is completed after regular, non-exceptional control flow). That's not correct,
as that block can (and generally does) already ends in a jump. Instead,
remember the insertion point that was current before the __finally got emitted,
and restore that.
Fixes two of the crashes in PR22553.
llvm-svn: 230503
2015-02-26 00:25:00 +08:00
|
|
|
// CHECK-LABEL: define i32 @nested___finally___finally
|
2015-07-07 08:36:30 +08:00
|
|
|
// CHECK: invoke void @"\01?fin$1@0@nested___finally___finally@@"({{.*}})
|
2015-04-15 04:59:00 +08:00
|
|
|
// CHECK: to label %[[outercont:[^ ]*]] unwind label %[[lpad:[^ ]*]]
|
Reland r230460 with a test fix for -Asserts builds.
Original CL description:
Produce less broken basic block sequences for __finally blocks.
The way cleanups (such as PerformSEHFinally) get emitted is that codegen
generates some initialization code, then calls the cleanup's Emit() with the
insertion point set to a good place, then the cleanup is supposed to emit its
stuff, and then codegen might tack in a jump or similar to where the insertion
point is after the cleanup.
The PerformSEHFinally cleanup tries to just stash away the block it's supposed
to codegen into, and then does codegen later, into that stashed block. However,
after codegen'ing the __finally block, it used to set the insertion point to
the finally's continuation block (where the __finally cleanup goes when its body
is completed after regular, non-exceptional control flow). That's not correct,
as that block can (and generally does) already ends in a jump. Instead,
remember the insertion point that was current before the __finally got emitted,
and restore that.
Fixes two of the crashes in PR22553.
llvm-svn: 230503
2015-02-26 00:25:00 +08:00
|
|
|
//
|
2015-04-15 04:59:00 +08:00
|
|
|
// CHECK: [[outercont]]
|
2015-07-07 08:36:30 +08:00
|
|
|
// CHECK: call void @"\01?fin$0@0@nested___finally___finally@@"({{.*}})
|
2015-04-15 04:59:00 +08:00
|
|
|
// CHECK-NEXT: ret i32 0
|
2015-02-27 06:34:33 +08:00
|
|
|
//
|
2015-04-15 04:59:00 +08:00
|
|
|
// CHECK: [[lpad]]
|
2015-10-08 09:13:52 +08:00
|
|
|
// CHECK-NEXT: %[[pad:[^ ]*]] = cleanuppad
|
2015-12-12 13:39:21 +08:00
|
|
|
// CHECK: call void @"\01?fin$0@0@nested___finally___finally@@"({{.*}})
|
|
|
|
// CHECK-NEXT: cleanupret from %[[pad]] unwind to caller
|
2015-04-15 04:59:00 +08:00
|
|
|
|
2015-07-07 08:36:30 +08:00
|
|
|
// CHECK-LABEL: define internal void @"\01?fin$0@0@nested___finally___finally@@"({{.*}})
|
2015-04-15 04:59:00 +08:00
|
|
|
// CHECK: ret void
|
|
|
|
|
2015-07-07 08:36:30 +08:00
|
|
|
// CHECK-LABEL: define internal void @"\01?fin$1@0@nested___finally___finally@@"({{.*}})
|
2015-04-15 04:59:00 +08:00
|
|
|
// CHECK: unreachable
|
2015-02-27 06:34:33 +08:00
|
|
|
|
2015-10-08 09:13:52 +08:00
|
|
|
// FIXME: Our behavior seems suspiciously different.
|
|
|
|
|
2015-02-27 06:34:33 +08:00
|
|
|
int nested___finally___finally_with_eh_edge() {
|
|
|
|
__try {
|
|
|
|
__try {
|
|
|
|
might_crash();
|
|
|
|
} __finally {
|
|
|
|
return 899;
|
|
|
|
}
|
|
|
|
} __finally {
|
|
|
|
// Intentionally no return here.
|
|
|
|
}
|
|
|
|
return 912;
|
|
|
|
}
|
|
|
|
// CHECK-LABEL: define i32 @nested___finally___finally_with_eh_edge
|
2015-04-15 04:59:00 +08:00
|
|
|
// CHECK: invoke void @might_crash()
|
|
|
|
// CHECK-NEXT: to label %[[invokecont:[^ ]*]] unwind label %[[lpad1:[^ ]*]]
|
2015-04-14 04:01:20 +08:00
|
|
|
//
|
2015-04-15 04:59:00 +08:00
|
|
|
// [[invokecont]]
|
2015-07-07 08:36:30 +08:00
|
|
|
// CHECK: invoke void @"\01?fin$1@0@nested___finally___finally_with_eh_edge@@"({{.*}})
|
2015-10-08 09:13:52 +08:00
|
|
|
// CHECK-NEXT: to label %[[outercont:[^ ]*]] unwind label %[[lpad2:[^ ]*]]
|
2015-04-14 04:04:22 +08:00
|
|
|
//
|
2015-04-15 04:59:00 +08:00
|
|
|
// CHECK: [[outercont]]
|
2015-07-07 08:36:30 +08:00
|
|
|
// CHECK: call void @"\01?fin$0@0@nested___finally___finally_with_eh_edge@@"({{.*}})
|
2015-04-15 04:59:00 +08:00
|
|
|
// CHECK-NEXT: ret i32 912
|
2015-04-14 04:04:22 +08:00
|
|
|
//
|
2015-04-15 04:59:00 +08:00
|
|
|
// CHECK: [[lpad1]]
|
2015-10-08 09:13:52 +08:00
|
|
|
// CHECK-NEXT: %[[innerpad:[^ ]*]] = cleanuppad
|
2015-07-07 08:36:30 +08:00
|
|
|
// CHECK: invoke void @"\01?fin$1@0@nested___finally___finally_with_eh_edge@@"({{.*}})
|
2015-12-12 13:39:21 +08:00
|
|
|
// CHECK-NEXT: label %[[innercleanupretbb:[^ ]*]] unwind label %[[lpad2:[^ ]*]]
|
2015-10-08 09:13:52 +08:00
|
|
|
//
|
|
|
|
// CHECK: [[innercleanupretbb]]
|
2015-12-12 13:39:21 +08:00
|
|
|
// CHECK-NEXT: cleanupret from %[[innerpad]] unwind label %[[lpad2]]
|
2015-04-14 04:04:22 +08:00
|
|
|
//
|
2015-04-15 04:59:00 +08:00
|
|
|
// CHECK: [[lpad2]]
|
2015-10-08 09:13:52 +08:00
|
|
|
// CHECK-NEXT: %[[outerpad:[^ ]*]] = cleanuppad
|
2015-12-12 13:39:21 +08:00
|
|
|
// CHECK: call void @"\01?fin$0@0@nested___finally___finally_with_eh_edge@@"({{.*}})
|
|
|
|
// CHECK-NEXT: cleanupret from %[[outerpad]] unwind to caller
|
2015-04-15 04:59:00 +08:00
|
|
|
|
2015-07-07 08:36:30 +08:00
|
|
|
// CHECK-LABEL: define internal void @"\01?fin$0@0@nested___finally___finally_with_eh_edge@@"({{.*}})
|
2015-04-15 04:59:00 +08:00
|
|
|
// CHECK: ret void
|
|
|
|
|
2015-07-07 08:36:30 +08:00
|
|
|
// CHECK-LABEL: define internal void @"\01?fin$1@0@nested___finally___finally_with_eh_edge@@"({{.*}})
|
2015-04-15 04:59:00 +08:00
|
|
|
// CHECK: unreachable
|