2015-10-07 07:40:43 +08:00
|
|
|
//===--- CGException.cpp - Emit LLVM Code for C++ exceptions ----*- C++ -*-===//
|
2009-10-30 09:42:31 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This contains code dealing with C++ exception related code generation.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "CodeGenFunction.h"
|
2014-11-25 15:20:20 +08:00
|
|
|
#include "CGCXXABI.h"
|
2015-01-14 19:29:14 +08:00
|
|
|
#include "CGCleanup.h"
|
2012-02-08 20:41:24 +08:00
|
|
|
#include "CGObjCRuntime.h"
|
2010-07-21 06:17:55 +08:00
|
|
|
#include "TargetInfo.h"
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
#include "clang/AST/Mangle.h"
|
2012-02-08 20:41:24 +08:00
|
|
|
#include "clang/AST/StmtCXX.h"
|
2013-01-19 16:09:44 +08:00
|
|
|
#include "clang/AST/StmtObjC.h"
|
2015-04-09 06:23:48 +08:00
|
|
|
#include "clang/AST/StmtVisitor.h"
|
2015-07-07 08:36:30 +08:00
|
|
|
#include "clang/Basic/TargetBuiltins.h"
|
2014-03-04 19:02:08 +08:00
|
|
|
#include "llvm/IR/CallSite.h"
|
2013-01-02 19:45:17 +08:00
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2015-04-09 06:23:48 +08:00
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
2015-04-15 04:59:00 +08:00
|
|
|
#include "llvm/Support/SaveAndRestore.h"
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2009-10-30 09:42:31 +08:00
|
|
|
using namespace clang;
|
|
|
|
using namespace CodeGen;
|
|
|
|
|
2013-02-12 11:51:38 +08:00
|
|
|
static llvm::Constant *getFreeExceptionFn(CodeGenModule &CGM) {
|
2009-12-02 15:41:41 +08:00
|
|
|
// void __cxa_free_exception(void *thrown_exception);
|
2009-12-10 08:06:18 +08:00
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::FunctionType *FTy =
|
2013-02-12 11:51:38 +08:00
|
|
|
llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
|
2009-12-10 08:06:18 +08:00
|
|
|
|
2013-02-12 11:51:38 +08:00
|
|
|
return CGM.CreateRuntimeFunction(FTy, "__cxa_free_exception");
|
2009-12-02 15:41:41 +08:00
|
|
|
}
|
|
|
|
|
2013-02-12 11:51:38 +08:00
|
|
|
static llvm::Constant *getUnexpectedFn(CodeGenModule &CGM) {
|
2013-06-21 07:03:35 +08:00
|
|
|
// void __cxa_call_unexpected(void *thrown_exception);
|
2009-12-08 07:38:24 +08:00
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::FunctionType *FTy =
|
2013-02-12 11:51:38 +08:00
|
|
|
llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
|
2009-12-10 08:06:18 +08:00
|
|
|
|
2013-02-12 11:51:38 +08:00
|
|
|
return CGM.CreateRuntimeFunction(FTy, "__cxa_call_unexpected");
|
2009-12-08 07:38:24 +08:00
|
|
|
}
|
|
|
|
|
2015-03-04 03:21:04 +08:00
|
|
|
llvm::Constant *CodeGenModule::getTerminateFn() {
|
2009-12-02 15:41:41 +08:00
|
|
|
// void __terminate();
|
|
|
|
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::FunctionType *FTy =
|
2015-03-04 03:21:04 +08:00
|
|
|
llvm::FunctionType::get(VoidTy, /*IsVarArgs=*/false);
|
2009-12-10 08:06:18 +08:00
|
|
|
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef name;
|
2011-07-06 09:22:26 +08:00
|
|
|
|
|
|
|
// In C++, use std::terminate().
|
2015-03-04 03:21:04 +08:00
|
|
|
if (getLangOpts().CPlusPlus &&
|
|
|
|
getTarget().getCXXABI().isItaniumFamily()) {
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
name = "_ZSt9terminatev";
|
2015-03-04 03:21:04 +08:00
|
|
|
} else if (getLangOpts().CPlusPlus &&
|
|
|
|
getTarget().getCXXABI().isMicrosoft()) {
|
2015-05-11 11:57:49 +08:00
|
|
|
if (getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2015))
|
2015-05-11 05:38:26 +08:00
|
|
|
name = "__std_terminate";
|
|
|
|
else
|
|
|
|
name = "\01?terminate@@YAXXZ";
|
2015-03-04 03:21:04 +08:00
|
|
|
} else if (getLangOpts().ObjC1 &&
|
|
|
|
getLangOpts().ObjCRuntime.hasTerminate())
|
2011-07-06 09:22:26 +08:00
|
|
|
name = "objc_terminate";
|
|
|
|
else
|
|
|
|
name = "abort";
|
2015-03-04 03:21:04 +08:00
|
|
|
return CreateRuntimeFunction(FTy, name);
|
2010-05-17 21:49:20 +08:00
|
|
|
}
|
|
|
|
|
2013-02-12 11:51:38 +08:00
|
|
|
static llvm::Constant *getCatchallRethrowFn(CodeGenModule &CGM,
|
2011-07-23 18:55:15 +08:00
|
|
|
StringRef Name) {
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::FunctionType *FTy =
|
2013-02-12 11:51:38 +08:00
|
|
|
llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
|
2010-07-17 08:43:08 +08:00
|
|
|
|
2013-02-12 11:51:38 +08:00
|
|
|
return CGM.CreateRuntimeFunction(FTy, Name);
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
const EHPersonality EHPersonality::GNU_C = { "__gcc_personality_v0", nullptr };
|
2012-02-08 20:41:24 +08:00
|
|
|
const EHPersonality
|
2014-05-21 13:09:00 +08:00
|
|
|
EHPersonality::GNU_C_SJLJ = { "__gcc_personality_sj0", nullptr };
|
|
|
|
const EHPersonality
|
2014-09-16 01:19:16 +08:00
|
|
|
EHPersonality::GNU_C_SEH = { "__gcc_personality_seh0", nullptr };
|
|
|
|
const EHPersonality
|
2014-05-21 13:09:00 +08:00
|
|
|
EHPersonality::NeXT_ObjC = { "__objc_personality_v0", nullptr };
|
|
|
|
const EHPersonality
|
|
|
|
EHPersonality::GNU_CPlusPlus = { "__gxx_personality_v0", nullptr };
|
|
|
|
const EHPersonality
|
|
|
|
EHPersonality::GNU_CPlusPlus_SJLJ = { "__gxx_personality_sj0", nullptr };
|
2012-02-08 20:41:24 +08:00
|
|
|
const EHPersonality
|
2014-09-16 01:19:16 +08:00
|
|
|
EHPersonality::GNU_CPlusPlus_SEH = { "__gxx_personality_seh0", nullptr };
|
|
|
|
const EHPersonality
|
2012-02-08 20:41:24 +08:00
|
|
|
EHPersonality::GNU_ObjC = {"__gnu_objc_personality_v0", "objc_exception_throw"};
|
|
|
|
const EHPersonality
|
2017-01-09 06:58:07 +08:00
|
|
|
EHPersonality::GNU_ObjC_SJLJ = {"__gnu_objc_personality_sj0", "objc_exception_throw"};
|
|
|
|
const EHPersonality
|
|
|
|
EHPersonality::GNU_ObjC_SEH = {"__gnu_objc_personality_seh0", "objc_exception_throw"};
|
|
|
|
const EHPersonality
|
2014-05-21 13:09:00 +08:00
|
|
|
EHPersonality::GNU_ObjCXX = { "__gnustep_objcxx_personality_v0", nullptr };
|
2013-01-11 23:33:01 +08:00
|
|
|
const EHPersonality
|
2014-05-21 13:09:00 +08:00
|
|
|
EHPersonality::GNUstep_ObjC = { "__gnustep_objc_personality_v0", nullptr };
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
const EHPersonality
|
|
|
|
EHPersonality::MSVC_except_handler = { "_except_handler3", nullptr };
|
|
|
|
const EHPersonality
|
|
|
|
EHPersonality::MSVC_C_specific_handler = { "__C_specific_handler", nullptr };
|
2015-02-06 02:56:03 +08:00
|
|
|
const EHPersonality
|
|
|
|
EHPersonality::MSVC_CxxFrameHandler3 = { "__CxxFrameHandler3", nullptr };
|
2010-07-17 08:43:08 +08:00
|
|
|
|
2014-11-14 10:01:10 +08:00
|
|
|
/// On Win64, use libgcc's SEH personality function. We fall back to dwarf on
|
|
|
|
/// other platforms, unless the user asked for SjLj exceptions.
|
|
|
|
static bool useLibGCCSEHPersonality(const llvm::Triple &T) {
|
|
|
|
return T.isOSWindows() && T.getArch() == llvm::Triple::x86_64;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const EHPersonality &getCPersonality(const llvm::Triple &T,
|
|
|
|
const LangOptions &L) {
|
2010-11-07 10:35:25 +08:00
|
|
|
if (L.SjLjExceptions)
|
|
|
|
return EHPersonality::GNU_C_SJLJ;
|
2014-11-14 10:01:10 +08:00
|
|
|
else if (useLibGCCSEHPersonality(T))
|
2014-09-16 01:19:16 +08:00
|
|
|
return EHPersonality::GNU_C_SEH;
|
2010-07-17 08:43:08 +08:00
|
|
|
return EHPersonality::GNU_C;
|
|
|
|
}
|
|
|
|
|
2014-11-14 10:01:10 +08:00
|
|
|
static const EHPersonality &getObjCPersonality(const llvm::Triple &T,
|
|
|
|
const LangOptions &L) {
|
2012-06-20 14:18:46 +08:00
|
|
|
switch (L.ObjCRuntime.getKind()) {
|
|
|
|
case ObjCRuntime::FragileMacOSX:
|
2014-11-14 10:01:10 +08:00
|
|
|
return getCPersonality(T, L);
|
2012-06-20 14:18:46 +08:00
|
|
|
case ObjCRuntime::MacOSX:
|
|
|
|
case ObjCRuntime::iOS:
|
2015-10-31 00:30:36 +08:00
|
|
|
case ObjCRuntime::WatchOS:
|
2012-06-20 14:18:46 +08:00
|
|
|
return EHPersonality::NeXT_ObjC;
|
2012-07-04 04:49:52 +08:00
|
|
|
case ObjCRuntime::GNUstep:
|
2013-01-11 23:33:01 +08:00
|
|
|
if (L.ObjCRuntime.getVersion() >= VersionTuple(1, 7))
|
|
|
|
return EHPersonality::GNUstep_ObjC;
|
|
|
|
// fallthrough
|
2012-07-04 04:49:52 +08:00
|
|
|
case ObjCRuntime::GCC:
|
2012-07-12 10:07:58 +08:00
|
|
|
case ObjCRuntime::ObjFW:
|
2017-01-09 06:58:07 +08:00
|
|
|
if (L.SjLjExceptions)
|
|
|
|
return EHPersonality::GNU_ObjC_SJLJ;
|
|
|
|
else if (useLibGCCSEHPersonality(T))
|
|
|
|
return EHPersonality::GNU_ObjC_SEH;
|
2010-07-17 08:43:08 +08:00
|
|
|
return EHPersonality::GNU_ObjC;
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
2012-06-20 14:18:46 +08:00
|
|
|
llvm_unreachable("bad runtime kind");
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
|
|
|
|
2014-11-14 10:01:10 +08:00
|
|
|
static const EHPersonality &getCXXPersonality(const llvm::Triple &T,
|
|
|
|
const LangOptions &L) {
|
2010-07-17 08:43:08 +08:00
|
|
|
if (L.SjLjExceptions)
|
|
|
|
return EHPersonality::GNU_CPlusPlus_SJLJ;
|
2014-11-14 10:01:10 +08:00
|
|
|
else if (useLibGCCSEHPersonality(T))
|
2014-09-16 01:19:16 +08:00
|
|
|
return EHPersonality::GNU_CPlusPlus_SEH;
|
2014-11-14 10:01:10 +08:00
|
|
|
return EHPersonality::GNU_CPlusPlus;
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Determines the personality function to use when both C++
|
|
|
|
/// and Objective-C exceptions are being caught.
|
2014-11-14 10:01:10 +08:00
|
|
|
static const EHPersonality &getObjCXXPersonality(const llvm::Triple &T,
|
|
|
|
const LangOptions &L) {
|
2012-06-20 14:18:46 +08:00
|
|
|
switch (L.ObjCRuntime.getKind()) {
|
2010-07-06 09:34:17 +08:00
|
|
|
// The ObjC personality defers to the C++ personality for non-ObjC
|
|
|
|
// handlers. Unlike the C++ case, we use the same personality
|
|
|
|
// function on targets using (backend-driven) SJLJ EH.
|
2012-06-20 14:18:46 +08:00
|
|
|
case ObjCRuntime::MacOSX:
|
|
|
|
case ObjCRuntime::iOS:
|
2015-10-31 00:30:36 +08:00
|
|
|
case ObjCRuntime::WatchOS:
|
2012-06-20 14:18:46 +08:00
|
|
|
return EHPersonality::NeXT_ObjC;
|
|
|
|
|
|
|
|
// In the fragile ABI, just use C++ exception handling and hope
|
|
|
|
// they're not doing crazy exception mixing.
|
|
|
|
case ObjCRuntime::FragileMacOSX:
|
2014-11-14 10:01:10 +08:00
|
|
|
return getCXXPersonality(T, L);
|
2010-05-17 21:49:20 +08:00
|
|
|
|
2012-07-04 04:49:52 +08:00
|
|
|
// The GCC runtime's personality function inherently doesn't support
|
2010-07-17 08:43:08 +08:00
|
|
|
// mixed EH. Use the C++ personality just to avoid returning null.
|
2012-07-04 04:49:52 +08:00
|
|
|
case ObjCRuntime::GCC:
|
2017-04-02 01:59:01 +08:00
|
|
|
case ObjCRuntime::ObjFW:
|
|
|
|
return getObjCPersonality(T, L);
|
2012-07-04 04:49:52 +08:00
|
|
|
case ObjCRuntime::GNUstep:
|
2012-06-20 14:18:46 +08:00
|
|
|
return EHPersonality::GNU_ObjCXX;
|
|
|
|
}
|
|
|
|
llvm_unreachable("bad runtime kind");
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
|
|
|
|
2015-02-06 02:56:03 +08:00
|
|
|
static const EHPersonality &getSEHPersonalityMSVC(const llvm::Triple &T) {
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
if (T.getArch() == llvm::Triple::x86)
|
|
|
|
return EHPersonality::MSVC_except_handler;
|
|
|
|
return EHPersonality::MSVC_C_specific_handler;
|
|
|
|
}
|
|
|
|
|
2015-02-06 02:56:03 +08:00
|
|
|
const EHPersonality &EHPersonality::get(CodeGenModule &CGM,
|
|
|
|
const FunctionDecl *FD) {
|
2014-11-14 10:01:10 +08:00
|
|
|
const llvm::Triple &T = CGM.getTarget().getTriple();
|
|
|
|
const LangOptions &L = CGM.getLangOpts();
|
2015-02-06 02:56:03 +08:00
|
|
|
|
2015-09-18 01:04:13 +08:00
|
|
|
// Functions using SEH get an SEH personality.
|
|
|
|
if (FD && FD->usesSEHTry())
|
|
|
|
return getSEHPersonalityMSVC(T);
|
|
|
|
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
// Try to pick a personality function that is compatible with MSVC if we're
|
|
|
|
// not compiling Obj-C. Obj-C users better have an Obj-C runtime that supports
|
|
|
|
// the GCC-style personality function.
|
|
|
|
if (T.isWindowsMSVCEnvironment() && !L.ObjC1) {
|
2015-02-06 02:56:03 +08:00
|
|
|
if (L.SjLjExceptions)
|
|
|
|
return EHPersonality::GNU_CPlusPlus_SJLJ;
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
else
|
2015-02-06 02:56:03 +08:00
|
|
|
return EHPersonality::MSVC_CxxFrameHandler3;
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
}
|
|
|
|
|
2010-07-17 08:43:08 +08:00
|
|
|
if (L.CPlusPlus && L.ObjC1)
|
2014-11-14 10:01:10 +08:00
|
|
|
return getObjCXXPersonality(T, L);
|
2010-07-17 08:43:08 +08:00
|
|
|
else if (L.CPlusPlus)
|
2014-11-14 10:01:10 +08:00
|
|
|
return getCXXPersonality(T, L);
|
2010-07-17 08:43:08 +08:00
|
|
|
else if (L.ObjC1)
|
2014-11-14 10:01:10 +08:00
|
|
|
return getObjCPersonality(T, L);
|
2010-07-06 09:34:17 +08:00
|
|
|
else
|
2014-11-14 10:01:10 +08:00
|
|
|
return getCPersonality(T, L);
|
2010-07-17 08:43:08 +08:00
|
|
|
}
|
|
|
|
|
2015-07-23 07:46:21 +08:00
|
|
|
const EHPersonality &EHPersonality::get(CodeGenFunction &CGF) {
|
|
|
|
return get(CGF.CGM, dyn_cast_or_null<FunctionDecl>(CGF.CurCodeDecl));
|
|
|
|
}
|
|
|
|
|
2010-09-16 14:16:50 +08:00
|
|
|
static llvm::Constant *getPersonalityFn(CodeGenModule &CGM,
|
2010-07-17 08:43:08 +08:00
|
|
|
const EHPersonality &Personality) {
|
2016-12-15 14:59:05 +08:00
|
|
|
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty, true),
|
|
|
|
Personality.PersonalityFn,
|
2017-03-22 00:57:30 +08:00
|
|
|
llvm::AttributeList(), /*Local=*/true);
|
2010-09-16 14:16:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static llvm::Constant *getOpaquePersonalityFn(CodeGenModule &CGM,
|
|
|
|
const EHPersonality &Personality) {
|
|
|
|
llvm::Constant *Fn = getPersonalityFn(CGM, Personality);
|
2011-02-08 16:22:06 +08:00
|
|
|
return llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
|
2010-09-16 14:16:50 +08:00
|
|
|
}
|
|
|
|
|
2015-09-11 23:40:05 +08:00
|
|
|
/// Check whether a landingpad instruction only uses C++ features.
|
|
|
|
static bool LandingPadHasOnlyCXXUses(llvm::LandingPadInst *LPI) {
|
|
|
|
for (unsigned I = 0, E = LPI->getNumClauses(); I != E; ++I) {
|
|
|
|
// Look for something that would've been returned by the ObjC
|
|
|
|
// runtime's GetEHType() method.
|
|
|
|
llvm::Value *Val = LPI->getClause(I)->stripPointerCasts();
|
|
|
|
if (LPI->isCatch(I)) {
|
|
|
|
// Check if the catch value has the ObjC prefix.
|
|
|
|
if (llvm::GlobalVariable *GV = dyn_cast<llvm::GlobalVariable>(Val))
|
|
|
|
// ObjC EH selector entries are always global variables with
|
|
|
|
// names starting like this.
|
|
|
|
if (GV->getName().startswith("OBJC_EHTYPE"))
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
// Check if any of the filter values have the ObjC prefix.
|
|
|
|
llvm::Constant *CVal = cast<llvm::Constant>(Val);
|
|
|
|
for (llvm::User::op_iterator
|
|
|
|
II = CVal->op_begin(), IE = CVal->op_end(); II != IE; ++II) {
|
|
|
|
if (llvm::GlobalVariable *GV =
|
|
|
|
cast<llvm::GlobalVariable>((*II)->stripPointerCasts()))
|
|
|
|
// ObjC EH selector entries are always global variables with
|
|
|
|
// names starting like this.
|
|
|
|
if (GV->getName().startswith("OBJC_EHTYPE"))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-09-16 14:16:50 +08:00
|
|
|
/// Check whether a personality function could reasonably be swapped
|
|
|
|
/// for a C++ personality function.
|
|
|
|
static bool PersonalityHasOnlyCXXUses(llvm::Constant *Fn) {
|
2014-03-09 11:16:50 +08:00
|
|
|
for (llvm::User *U : Fn->users()) {
|
2010-09-16 14:16:50 +08:00
|
|
|
// Conditionally white-list bitcasts.
|
2014-03-09 11:16:50 +08:00
|
|
|
if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(U)) {
|
2010-09-16 14:16:50 +08:00
|
|
|
if (CE->getOpcode() != llvm::Instruction::BitCast) return false;
|
|
|
|
if (!PersonalityHasOnlyCXXUses(CE))
|
|
|
|
return false;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2015-09-11 23:40:05 +08:00
|
|
|
// Otherwise it must be a function.
|
|
|
|
llvm::Function *F = dyn_cast<llvm::Function>(U);
|
|
|
|
if (!F) return false;
|
|
|
|
|
|
|
|
for (auto BB = F->begin(), E = F->end(); BB != E; ++BB) {
|
|
|
|
if (BB->isLandingPad())
|
|
|
|
if (!LandingPadHasOnlyCXXUses(BB->getLandingPadInst()))
|
|
|
|
return false;
|
2010-09-16 14:16:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Try to use the C++ personality function in ObjC++. Not doing this
|
|
|
|
/// can cause some incompatibilities with gcc, which is more
|
|
|
|
/// aggressive about only using the ObjC++ personality in a function
|
|
|
|
/// when it really needs it.
|
|
|
|
void CodeGenModule::SimplifyPersonality() {
|
|
|
|
// If we're not in ObjC++ -fexceptions, there's nothing to do.
|
2012-03-11 15:00:24 +08:00
|
|
|
if (!LangOpts.CPlusPlus || !LangOpts.ObjC1 || !LangOpts.Exceptions)
|
2010-09-16 14:16:50 +08:00
|
|
|
return;
|
|
|
|
|
2012-11-15 01:48:31 +08:00
|
|
|
// Both the problem this endeavors to fix and the way the logic
|
|
|
|
// above works is specific to the NeXT runtime.
|
|
|
|
if (!LangOpts.ObjCRuntime.isNeXTFamily())
|
|
|
|
return;
|
|
|
|
|
2015-02-06 02:56:03 +08:00
|
|
|
const EHPersonality &ObjCXX = EHPersonality::get(*this, /*FD=*/nullptr);
|
2014-11-14 10:01:10 +08:00
|
|
|
const EHPersonality &CXX =
|
|
|
|
getCXXPersonality(getTarget().getTriple(), LangOpts);
|
2012-02-08 20:41:24 +08:00
|
|
|
if (&ObjCXX == &CXX)
|
2010-09-16 14:16:50 +08:00
|
|
|
return;
|
|
|
|
|
2012-02-08 20:41:24 +08:00
|
|
|
assert(std::strcmp(ObjCXX.PersonalityFn, CXX.PersonalityFn) != 0 &&
|
|
|
|
"Different EHPersonalities using the same personality function.");
|
|
|
|
|
|
|
|
llvm::Function *Fn = getModule().getFunction(ObjCXX.PersonalityFn);
|
2010-09-16 14:16:50 +08:00
|
|
|
|
|
|
|
// Nothing to do if it's unused.
|
|
|
|
if (!Fn || Fn->use_empty()) return;
|
|
|
|
|
|
|
|
// Can't do the optimization if it has non-C++ uses.
|
|
|
|
if (!PersonalityHasOnlyCXXUses(Fn)) return;
|
|
|
|
|
|
|
|
// Create the C++ personality function and kill off the old
|
|
|
|
// function.
|
|
|
|
llvm::Constant *CXXFn = getPersonalityFn(*this, CXX);
|
|
|
|
|
|
|
|
// This can happen if the user is screwing with us.
|
|
|
|
if (Fn->getType() != CXXFn->getType()) return;
|
|
|
|
|
|
|
|
Fn->replaceAllUsesWith(CXXFn);
|
|
|
|
Fn->eraseFromParent();
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the value to inject into a selector to indicate the
|
|
|
|
/// presence of a catch-all.
|
|
|
|
static llvm::Constant *getCatchAllValue(CodeGenFunction &CGF) {
|
|
|
|
// Possibly we should use @llvm.eh.catch.all.value here.
|
2011-02-08 16:22:06 +08:00
|
|
|
return llvm::ConstantPointerNull::get(CGF.Int8PtrTy);
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
|
|
|
|
2010-07-14 05:17:51 +08:00
|
|
|
namespace {
|
|
|
|
/// A cleanup to free the exception object if its initialization
|
|
|
|
/// throws.
|
2015-08-19 06:40:54 +08:00
|
|
|
struct FreeException final : EHScopeStack::Cleanup {
|
2011-07-12 08:15:30 +08:00
|
|
|
llvm::Value *exn;
|
|
|
|
FreeException(llvm::Value *exn) : exn(exn) {}
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2013-03-01 03:01:20 +08:00
|
|
|
CGF.EmitNounwindRuntimeCall(getFreeExceptionFn(CGF.CGM), exn);
|
2010-07-14 05:17:51 +08:00
|
|
|
}
|
|
|
|
};
|
2015-10-07 07:40:43 +08:00
|
|
|
} // end anonymous namespace
|
2010-07-14 05:17:51 +08:00
|
|
|
|
2010-04-22 09:10:34 +08:00
|
|
|
// Emits an exception expression into the given location. This
|
|
|
|
// differs from EmitAnyExprToMem only in that, if a final copy-ctor
|
|
|
|
// call is required, an exception within that copy ctor causes
|
|
|
|
// std::terminate to be invoked.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
void CodeGenFunction::EmitAnyExprToExn(const Expr *e, Address addr) {
|
2010-07-06 09:34:17 +08:00
|
|
|
// Make sure the exception object is cleaned up if there's an
|
|
|
|
// exception during initialization.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
pushFullExprCleanup<FreeException>(EHCleanup, addr.getPointer());
|
2015-03-05 08:46:22 +08:00
|
|
|
EHScopeStack::stable_iterator cleanup = EHStack.stable_begin();
|
2010-04-22 09:10:34 +08:00
|
|
|
|
|
|
|
// __cxa_allocate_exception returns a void*; we need to cast this
|
|
|
|
// to the appropriate type for the object.
|
2015-03-05 08:46:22 +08:00
|
|
|
llvm::Type *ty = ConvertTypeForMem(e->getType())->getPointerTo();
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address typedAddr = Builder.CreateBitCast(addr, ty);
|
2010-04-22 09:10:34 +08:00
|
|
|
|
|
|
|
// FIXME: this isn't quite right! If there's a final unelided call
|
|
|
|
// to a copy constructor, then according to [except.terminate]p1 we
|
|
|
|
// must call std::terminate() if that constructor throws, because
|
|
|
|
// technically that copy occurs after the exception expression is
|
|
|
|
// evaluated but before the exception is caught. But the best way
|
|
|
|
// to handle that is to teach EmitAggExpr to do the final copy
|
|
|
|
// differently if it can't be elided.
|
2015-03-05 08:46:22 +08:00
|
|
|
EmitAnyExprToMem(e, typedAddr, e->getType().getQualifiers(),
|
|
|
|
/*IsInit*/ true);
|
2010-04-22 09:10:34 +08:00
|
|
|
|
2011-01-28 16:37:24 +08:00
|
|
|
// Deactivate the cleanup block.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
DeactivateCleanupBlock(cleanup,
|
|
|
|
cast<llvm::Instruction>(typedAddr.getPointer()));
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address CodeGenFunction::getExceptionSlot() {
|
2011-05-29 05:13:02 +08:00
|
|
|
if (!ExceptionSlot)
|
|
|
|
ExceptionSlot = CreateTempAlloca(Int8PtrTy, "exn.slot");
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return Address(ExceptionSlot, getPointerAlign());
|
2009-12-01 11:41:18 +08:00
|
|
|
}
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address CodeGenFunction::getEHSelectorSlot() {
|
2011-05-29 05:13:02 +08:00
|
|
|
if (!EHSelectorSlot)
|
|
|
|
EHSelectorSlot = CreateTempAlloca(Int32Ty, "ehselector.slot");
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return Address(EHSelectorSlot, CharUnits::fromQuantity(4));
|
2011-05-29 05:13:02 +08:00
|
|
|
}
|
|
|
|
|
2011-09-16 02:57:19 +08:00
|
|
|
llvm::Value *CodeGenFunction::getExceptionFromSlot() {
|
|
|
|
return Builder.CreateLoad(getExceptionSlot(), "exn");
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *CodeGenFunction::getSelectorFromSlot() {
|
|
|
|
return Builder.CreateLoad(getEHSelectorSlot(), "sel");
|
|
|
|
}
|
|
|
|
|
2013-05-08 05:53:22 +08:00
|
|
|
void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E,
|
|
|
|
bool KeepInsertionPoint) {
|
2015-03-05 08:46:22 +08:00
|
|
|
if (const Expr *SubExpr = E->getSubExpr()) {
|
|
|
|
QualType ThrowType = SubExpr->getType();
|
|
|
|
if (ThrowType->isObjCObjectPointerType()) {
|
|
|
|
const Stmt *ThrowStmt = E->getSubExpr();
|
|
|
|
const ObjCAtThrowStmt S(E->getExprLoc(), const_cast<Stmt *>(ThrowStmt));
|
|
|
|
CGM.getObjCRuntime().EmitThrowStmt(*this, S, false);
|
|
|
|
} else {
|
|
|
|
CGM.getCXXABI().emitThrow(*this, E);
|
2010-04-22 09:10:34 +08:00
|
|
|
}
|
2015-03-05 08:46:22 +08:00
|
|
|
} else {
|
|
|
|
CGM.getCXXABI().emitRethrow(*this, /*isNoReturn=*/true);
|
2010-04-22 09:10:34 +08:00
|
|
|
}
|
2009-12-10 08:06:18 +08:00
|
|
|
|
2011-01-12 11:41:02 +08:00
|
|
|
// throw is an expression, and the expression emitters expect us
|
|
|
|
// to leave ourselves at a valid insertion point.
|
2013-05-08 05:53:22 +08:00
|
|
|
if (KeepInsertionPoint)
|
|
|
|
EmitBlock(createBasicBlock("throw.cont"));
|
2009-10-30 09:42:31 +08:00
|
|
|
}
|
2009-11-21 07:44:51 +08:00
|
|
|
|
2009-12-08 07:38:24 +08:00
|
|
|
void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
|
2012-03-11 15:00:24 +08:00
|
|
|
if (!CGM.getLangOpts().CXXExceptions)
|
2010-02-07 07:59:05 +08:00
|
|
|
return;
|
|
|
|
|
2009-12-08 07:38:24 +08:00
|
|
|
const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
|
2014-05-21 13:09:00 +08:00
|
|
|
if (!FD) {
|
2014-05-06 18:08:46 +08:00
|
|
|
// Check if CapturedDecl is nothrow and create terminate scope for it.
|
|
|
|
if (const CapturedDecl* CD = dyn_cast_or_null<CapturedDecl>(D)) {
|
|
|
|
if (CD->isNothrow())
|
|
|
|
EHStack.pushTerminate();
|
|
|
|
}
|
2009-12-08 07:38:24 +08:00
|
|
|
return;
|
2014-05-06 18:08:46 +08:00
|
|
|
}
|
2009-12-08 07:38:24 +08:00
|
|
|
const FunctionProtoType *Proto = FD->getType()->getAs<FunctionProtoType>();
|
2014-05-21 13:09:00 +08:00
|
|
|
if (!Proto)
|
2009-12-08 07:38:24 +08:00
|
|
|
return;
|
|
|
|
|
2011-03-16 02:42:48 +08:00
|
|
|
ExceptionSpecificationType EST = Proto->getExceptionSpecType();
|
|
|
|
if (isNoexceptExceptionSpec(EST)) {
|
|
|
|
if (Proto->getNoexceptSpec(getContext()) == FunctionProtoType::NR_Nothrow) {
|
|
|
|
// noexcept functions are simple terminate scopes.
|
|
|
|
EHStack.pushTerminate();
|
|
|
|
}
|
|
|
|
} else if (EST == EST_Dynamic || EST == EST_DynamicNone) {
|
2015-04-01 12:45:52 +08:00
|
|
|
// TODO: Revisit exception specifications for the MS ABI. There is a way to
|
|
|
|
// encode these in an object file but MSVC doesn't do anything with it.
|
|
|
|
if (getTarget().getCXXABI().isMicrosoft())
|
|
|
|
return;
|
2011-03-16 02:42:48 +08:00
|
|
|
unsigned NumExceptions = Proto->getNumExceptions();
|
|
|
|
EHFilterScope *Filter = EHStack.pushFilter(NumExceptions);
|
|
|
|
|
|
|
|
for (unsigned I = 0; I != NumExceptions; ++I) {
|
|
|
|
QualType Ty = Proto->getExceptionType(I);
|
|
|
|
QualType ExceptType = Ty.getNonReferenceType().getUnqualifiedType();
|
|
|
|
llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType,
|
|
|
|
/*ForEH=*/true);
|
|
|
|
Filter->setFilter(I, EHType);
|
|
|
|
}
|
2009-12-08 07:38:24 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-08-11 10:22:43 +08:00
|
|
|
/// Emit the dispatch block for a filter scope if necessary.
|
|
|
|
static void emitFilterDispatchBlock(CodeGenFunction &CGF,
|
|
|
|
EHFilterScope &filterScope) {
|
|
|
|
llvm::BasicBlock *dispatchBlock = filterScope.getCachedEHDispatchBlock();
|
|
|
|
if (!dispatchBlock) return;
|
|
|
|
if (dispatchBlock->use_empty()) {
|
|
|
|
delete dispatchBlock;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
CGF.EmitBlockAfterUses(dispatchBlock);
|
|
|
|
|
|
|
|
// If this isn't a catch-all filter, we need to check whether we got
|
|
|
|
// here because the filter triggered.
|
|
|
|
if (filterScope.getNumFilters()) {
|
|
|
|
// Load the selector value.
|
2011-09-16 02:57:19 +08:00
|
|
|
llvm::Value *selector = CGF.getSelectorFromSlot();
|
2011-08-11 10:22:43 +08:00
|
|
|
llvm::BasicBlock *unexpectedBB = CGF.createBasicBlock("ehspec.unexpected");
|
|
|
|
|
|
|
|
llvm::Value *zero = CGF.Builder.getInt32(0);
|
|
|
|
llvm::Value *failsFilter =
|
2015-02-12 06:33:32 +08:00
|
|
|
CGF.Builder.CreateICmpSLT(selector, zero, "ehspec.fails");
|
|
|
|
CGF.Builder.CreateCondBr(failsFilter, unexpectedBB,
|
|
|
|
CGF.getEHResumeBlock(false));
|
2011-08-11 10:22:43 +08:00
|
|
|
|
|
|
|
CGF.EmitBlock(unexpectedBB);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Call __cxa_call_unexpected. This doesn't need to be an invoke
|
|
|
|
// because __cxa_call_unexpected magically filters exceptions
|
|
|
|
// according to the last landing pad the exception was thrown
|
|
|
|
// into. Seriously.
|
2011-09-16 02:57:19 +08:00
|
|
|
llvm::Value *exn = CGF.getExceptionFromSlot();
|
2013-03-01 03:01:20 +08:00
|
|
|
CGF.EmitRuntimeCall(getUnexpectedFn(CGF.CGM), exn)
|
2011-08-11 10:22:43 +08:00
|
|
|
->setDoesNotReturn();
|
|
|
|
CGF.Builder.CreateUnreachable();
|
|
|
|
}
|
|
|
|
|
2009-12-08 07:38:24 +08:00
|
|
|
void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
|
2012-03-11 15:00:24 +08:00
|
|
|
if (!CGM.getLangOpts().CXXExceptions)
|
2010-02-07 07:59:05 +08:00
|
|
|
return;
|
|
|
|
|
2009-12-08 07:38:24 +08:00
|
|
|
const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
|
2014-05-21 13:09:00 +08:00
|
|
|
if (!FD) {
|
2014-05-06 18:08:46 +08:00
|
|
|
// Check if CapturedDecl is nothrow and pop terminate scope for it.
|
|
|
|
if (const CapturedDecl* CD = dyn_cast_or_null<CapturedDecl>(D)) {
|
|
|
|
if (CD->isNothrow())
|
|
|
|
EHStack.popTerminate();
|
|
|
|
}
|
2009-12-08 07:38:24 +08:00
|
|
|
return;
|
2014-05-06 18:08:46 +08:00
|
|
|
}
|
2009-12-08 07:38:24 +08:00
|
|
|
const FunctionProtoType *Proto = FD->getType()->getAs<FunctionProtoType>();
|
2014-05-21 13:09:00 +08:00
|
|
|
if (!Proto)
|
2009-12-08 07:38:24 +08:00
|
|
|
return;
|
|
|
|
|
2011-03-16 02:42:48 +08:00
|
|
|
ExceptionSpecificationType EST = Proto->getExceptionSpecType();
|
|
|
|
if (isNoexceptExceptionSpec(EST)) {
|
|
|
|
if (Proto->getNoexceptSpec(getContext()) == FunctionProtoType::NR_Nothrow) {
|
|
|
|
EHStack.popTerminate();
|
|
|
|
}
|
|
|
|
} else if (EST == EST_Dynamic || EST == EST_DynamicNone) {
|
2015-04-01 12:45:52 +08:00
|
|
|
// TODO: Revisit exception specifications for the MS ABI. There is a way to
|
|
|
|
// encode these in an object file but MSVC doesn't do anything with it.
|
|
|
|
if (getTarget().getCXXABI().isMicrosoft())
|
|
|
|
return;
|
2011-08-11 10:22:43 +08:00
|
|
|
EHFilterScope &filterScope = cast<EHFilterScope>(*EHStack.begin());
|
|
|
|
emitFilterDispatchBlock(*this, filterScope);
|
2011-03-16 02:42:48 +08:00
|
|
|
EHStack.popFilter();
|
|
|
|
}
|
2009-12-08 07:38:24 +08:00
|
|
|
}
|
|
|
|
|
2009-11-21 07:44:51 +08:00
|
|
|
void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) {
|
2010-07-07 14:56:46 +08:00
|
|
|
EnterCXXTryStmt(S);
|
2010-02-19 17:25:03 +08:00
|
|
|
EmitStmt(S.getTryBlock());
|
2010-07-07 14:56:46 +08:00
|
|
|
ExitCXXTryStmt(S);
|
2010-02-19 17:25:03 +08:00
|
|
|
}
|
|
|
|
|
2010-07-07 14:56:46 +08:00
|
|
|
void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
|
2010-07-06 09:34:17 +08:00
|
|
|
unsigned NumHandlers = S.getNumHandlers();
|
|
|
|
EHCatchScope *CatchScope = EHStack.pushCatch(NumHandlers);
|
|
|
|
|
|
|
|
for (unsigned I = 0; I != NumHandlers; ++I) {
|
|
|
|
const CXXCatchStmt *C = S.getHandler(I);
|
|
|
|
|
|
|
|
llvm::BasicBlock *Handler = createBasicBlock("catch");
|
|
|
|
if (C->getExceptionDecl()) {
|
|
|
|
// FIXME: Dropping the reference type on the type into makes it
|
|
|
|
// impossible to correctly implement catch-by-reference
|
|
|
|
// semantics for pointers. Unfortunately, this is what all
|
|
|
|
// existing compilers do, and it's not clear that the standard
|
|
|
|
// personality routine is capable of doing this right. See C++ DR 388:
|
|
|
|
// http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#388
|
2014-10-12 14:58:22 +08:00
|
|
|
Qualifiers CaughtTypeQuals;
|
|
|
|
QualType CaughtType = CGM.getContext().getUnqualifiedArrayType(
|
|
|
|
C->getCaughtType().getNonReferenceType(), CaughtTypeQuals);
|
2010-07-24 08:37:23 +08:00
|
|
|
|
2015-09-17 04:15:55 +08:00
|
|
|
CatchTypeInfo TypeInfo{nullptr, 0};
|
2010-07-24 08:37:23 +08:00
|
|
|
if (CaughtType->isObjCObjectPointerType())
|
2015-09-17 04:15:55 +08:00
|
|
|
TypeInfo.RTTI = CGM.getObjCRuntime().GetEHType(CaughtType);
|
2010-07-24 08:37:23 +08:00
|
|
|
else
|
2015-09-17 04:15:55 +08:00
|
|
|
TypeInfo = CGM.getCXXABI().getAddrOfCXXCatchHandlerType(
|
|
|
|
CaughtType, C->getCaughtType());
|
2010-07-06 09:34:17 +08:00
|
|
|
CatchScope->setHandler(I, TypeInfo, Handler);
|
|
|
|
} else {
|
|
|
|
// No exception decl indicates '...', a catch-all.
|
2015-09-17 04:15:55 +08:00
|
|
|
CatchScope->setHandler(I, CGM.getCXXABI().getCatchAllTypeInfo(), Handler);
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-08-11 10:22:43 +08:00
|
|
|
llvm::BasicBlock *
|
|
|
|
CodeGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si) {
|
2015-10-08 09:13:52 +08:00
|
|
|
if (EHPersonality::get(*this).usesFuncletPads())
|
2015-08-01 01:58:45 +08:00
|
|
|
return getMSVCDispatchBlock(si);
|
|
|
|
|
2011-08-11 10:22:43 +08:00
|
|
|
// The dispatch block for the end of the scope chain is a block that
|
|
|
|
// just resumes unwinding.
|
|
|
|
if (si == EHStack.stable_end())
|
2012-11-08 00:50:40 +08:00
|
|
|
return getEHResumeBlock(true);
|
2011-08-11 10:22:43 +08:00
|
|
|
|
|
|
|
// Otherwise, we should look at the actual scope.
|
|
|
|
EHScope &scope = *EHStack.find(si);
|
|
|
|
|
|
|
|
llvm::BasicBlock *dispatchBlock = scope.getCachedEHDispatchBlock();
|
|
|
|
if (!dispatchBlock) {
|
|
|
|
switch (scope.getKind()) {
|
|
|
|
case EHScope::Catch: {
|
|
|
|
// Apply a special case to a single catch-all.
|
|
|
|
EHCatchScope &catchScope = cast<EHCatchScope>(scope);
|
|
|
|
if (catchScope.getNumHandlers() == 1 &&
|
|
|
|
catchScope.getHandler(0).isCatchAll()) {
|
|
|
|
dispatchBlock = catchScope.getHandler(0).Block;
|
|
|
|
|
|
|
|
// Otherwise, make a dispatch block.
|
|
|
|
} else {
|
|
|
|
dispatchBlock = createBasicBlock("catch.dispatch");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case EHScope::Cleanup:
|
|
|
|
dispatchBlock = createBasicBlock("ehcleanup");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case EHScope::Filter:
|
|
|
|
dispatchBlock = createBasicBlock("filter.dispatch");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case EHScope::Terminate:
|
|
|
|
dispatchBlock = getTerminateHandler();
|
|
|
|
break;
|
2015-08-01 01:58:45 +08:00
|
|
|
|
2015-09-11 06:11:13 +08:00
|
|
|
case EHScope::PadEnd:
|
|
|
|
llvm_unreachable("PadEnd unnecessary for Itanium!");
|
2011-08-11 10:22:43 +08:00
|
|
|
}
|
|
|
|
scope.setCachedEHDispatchBlock(dispatchBlock);
|
|
|
|
}
|
|
|
|
return dispatchBlock;
|
|
|
|
}
|
|
|
|
|
2015-08-01 01:58:45 +08:00
|
|
|
llvm::BasicBlock *
|
|
|
|
CodeGenFunction::getMSVCDispatchBlock(EHScopeStack::stable_iterator SI) {
|
|
|
|
// Returning nullptr indicates that the previous dispatch block should unwind
|
|
|
|
// to caller.
|
|
|
|
if (SI == EHStack.stable_end())
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
// Otherwise, we should look at the actual scope.
|
|
|
|
EHScope &EHS = *EHStack.find(SI);
|
|
|
|
|
|
|
|
llvm::BasicBlock *DispatchBlock = EHS.getCachedEHDispatchBlock();
|
|
|
|
if (DispatchBlock)
|
|
|
|
return DispatchBlock;
|
|
|
|
|
|
|
|
if (EHS.getKind() == EHScope::Terminate)
|
|
|
|
DispatchBlock = getTerminateHandler();
|
|
|
|
else
|
|
|
|
DispatchBlock = createBasicBlock();
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGBuilderTy Builder(*this, DispatchBlock);
|
2015-08-01 01:58:45 +08:00
|
|
|
|
|
|
|
switch (EHS.getKind()) {
|
|
|
|
case EHScope::Catch:
|
|
|
|
DispatchBlock->setName("catch.dispatch");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case EHScope::Cleanup:
|
|
|
|
DispatchBlock->setName("ehcleanup");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case EHScope::Filter:
|
|
|
|
llvm_unreachable("exception specifications not handled yet!");
|
|
|
|
|
|
|
|
case EHScope::Terminate:
|
|
|
|
DispatchBlock->setName("terminate");
|
|
|
|
break;
|
|
|
|
|
2015-09-11 06:11:13 +08:00
|
|
|
case EHScope::PadEnd:
|
|
|
|
llvm_unreachable("PadEnd dispatch block missing!");
|
2015-08-01 01:58:45 +08:00
|
|
|
}
|
|
|
|
EHS.setCachedEHDispatchBlock(DispatchBlock);
|
|
|
|
return DispatchBlock;
|
|
|
|
}
|
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
/// Check whether this is a non-EH scope, i.e. a scope which doesn't
|
|
|
|
/// affect exception handling. Currently, the only non-EH scopes are
|
|
|
|
/// normal-only cleanup scopes.
|
|
|
|
static bool isNonEHScope(const EHScope &S) {
|
2010-07-14 04:32:21 +08:00
|
|
|
switch (S.getKind()) {
|
2010-07-21 15:22:38 +08:00
|
|
|
case EHScope::Cleanup:
|
|
|
|
return !cast<EHCleanupScope>(S).isEHCleanup();
|
2010-07-14 04:32:21 +08:00
|
|
|
case EHScope::Filter:
|
|
|
|
case EHScope::Catch:
|
|
|
|
case EHScope::Terminate:
|
2015-09-11 06:11:13 +08:00
|
|
|
case EHScope::PadEnd:
|
2010-07-14 04:32:21 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-01-21 05:50:17 +08:00
|
|
|
llvm_unreachable("Invalid EHScope Kind!");
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
2010-02-19 17:25:03 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
|
|
|
|
assert(EHStack.requiresLandingPad());
|
|
|
|
assert(!EHStack.empty());
|
2010-02-19 17:25:03 +08:00
|
|
|
|
2016-03-02 03:51:48 +08:00
|
|
|
// If exceptions are disabled and SEH is not in use, then there is no invoke
|
|
|
|
// destination. SEH "works" even if exceptions are off. In practice, this
|
|
|
|
// means that C++ destructors and other EH cleanups don't run, which is
|
|
|
|
// consistent with MSVC's behavior.
|
2015-02-06 02:56:03 +08:00
|
|
|
const LangOptions &LO = CGM.getLangOpts();
|
|
|
|
if (!LO.Exceptions) {
|
|
|
|
if (!LO.Borland && !LO.MicrosoftExt)
|
|
|
|
return nullptr;
|
2015-02-11 08:00:21 +08:00
|
|
|
if (!currentFunctionUsesSEHTry())
|
2015-02-06 02:56:03 +08:00
|
|
|
return nullptr;
|
|
|
|
}
|
2010-07-14 04:32:21 +08:00
|
|
|
|
2016-10-05 07:41:49 +08:00
|
|
|
// CUDA device code doesn't have exceptions.
|
|
|
|
if (LO.CUDA && LO.CUDAIsDevice)
|
|
|
|
return nullptr;
|
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// Check the innermost scope for a cached landing pad. If this is
|
|
|
|
// a non-EH cleanup, we'll check enclosing scopes in EmitLandingPad.
|
|
|
|
llvm::BasicBlock *LP = EHStack.begin()->getCachedLandingPad();
|
|
|
|
if (LP) return LP;
|
|
|
|
|
2015-08-01 01:58:45 +08:00
|
|
|
const EHPersonality &Personality = EHPersonality::get(*this);
|
|
|
|
|
|
|
|
if (!CurFn->hasPersonalityFn())
|
|
|
|
CurFn->setPersonalityFn(getOpaquePersonalityFn(CGM, Personality));
|
|
|
|
|
2015-10-08 09:13:52 +08:00
|
|
|
if (Personality.usesFuncletPads()) {
|
|
|
|
// We don't need separate landing pads in the funclet model.
|
2015-08-01 01:58:45 +08:00
|
|
|
LP = getEHDispatchBlock(EHStack.getInnermostEHScope());
|
|
|
|
} else {
|
|
|
|
// Build the landing pad for this scope.
|
|
|
|
LP = EmitLandingPad();
|
|
|
|
}
|
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
assert(LP);
|
|
|
|
|
|
|
|
// Cache the landing pad on the innermost scope. If this is a
|
|
|
|
// non-EH scope, cache the landing pad on the enclosing scope, too.
|
|
|
|
for (EHScopeStack::iterator ir = EHStack.begin(); true; ++ir) {
|
|
|
|
ir->setCachedLandingPad(LP);
|
|
|
|
if (!isNonEHScope(*ir)) break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return LP;
|
2010-02-19 17:25:03 +08:00
|
|
|
}
|
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
|
|
|
|
assert(EHStack.requiresLandingPad());
|
|
|
|
|
2011-08-11 10:22:43 +08:00
|
|
|
EHScope &innermostEHScope = *EHStack.find(EHStack.getInnermostEHScope());
|
|
|
|
switch (innermostEHScope.getKind()) {
|
|
|
|
case EHScope::Terminate:
|
|
|
|
return getTerminateLandingPad();
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2015-09-11 06:11:13 +08:00
|
|
|
case EHScope::PadEnd:
|
|
|
|
llvm_unreachable("PadEnd unnecessary for Itanium!");
|
2015-08-01 01:58:45 +08:00
|
|
|
|
2011-08-11 10:22:43 +08:00
|
|
|
case EHScope::Catch:
|
|
|
|
case EHScope::Cleanup:
|
|
|
|
case EHScope::Filter:
|
|
|
|
if (llvm::BasicBlock *lpad = innermostEHScope.getCachedLandingPad())
|
|
|
|
return lpad;
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Save the current IR generation state.
|
2011-08-11 10:22:43 +08:00
|
|
|
CGBuilderTy::InsertPoint savedIP = Builder.saveAndClearIP();
|
2015-02-04 04:00:54 +08:00
|
|
|
auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, CurEHLocation);
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
// Create and configure the landing pad.
|
2011-08-11 10:22:43 +08:00
|
|
|
llvm::BasicBlock *lpad = createBasicBlock("lpad");
|
|
|
|
EmitBlock(lpad);
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2017-05-10 03:31:30 +08:00
|
|
|
llvm::LandingPadInst *LPadInst =
|
|
|
|
Builder.CreateLandingPad(llvm::StructType::get(Int8PtrTy, Int32Ty), 0);
|
2011-09-20 04:31:14 +08:00
|
|
|
|
|
|
|
llvm::Value *LPadExn = Builder.CreateExtractValue(LPadInst, 0);
|
|
|
|
Builder.CreateStore(LPadExn, getExceptionSlot());
|
|
|
|
llvm::Value *LPadSel = Builder.CreateExtractValue(LPadInst, 1);
|
|
|
|
Builder.CreateStore(LPadSel, getEHSelectorSlot());
|
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// Save the exception pointer. It's safe to use a single exception
|
|
|
|
// pointer per function because EH cleanups can never have nested
|
|
|
|
// try/catches.
|
2011-09-20 04:31:14 +08:00
|
|
|
// Build the landingpad instruction.
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
// Accumulate all the handlers in scope.
|
2011-08-11 10:22:43 +08:00
|
|
|
bool hasCatchAll = false;
|
|
|
|
bool hasCleanup = false;
|
|
|
|
bool hasFilter = false;
|
|
|
|
SmallVector<llvm::Value*, 4> filterTypes;
|
|
|
|
llvm::SmallPtrSet<llvm::Value*, 4> catchTypes;
|
Reland r230460 with a test fix for -Asserts builds.
Original CL description:
Produce less broken basic block sequences for __finally blocks.
The way cleanups (such as PerformSEHFinally) get emitted is that codegen
generates some initialization code, then calls the cleanup's Emit() with the
insertion point set to a good place, then the cleanup is supposed to emit its
stuff, and then codegen might tack in a jump or similar to where the insertion
point is after the cleanup.
The PerformSEHFinally cleanup tries to just stash away the block it's supposed
to codegen into, and then does codegen later, into that stashed block. However,
after codegen'ing the __finally block, it used to set the insertion point to
the finally's continuation block (where the __finally cleanup goes when its body
is completed after regular, non-exceptional control flow). That's not correct,
as that block can (and generally does) already ends in a jump. Instead,
remember the insertion point that was current before the __finally got emitted,
and restore that.
Fixes two of the crashes in PR22553.
llvm-svn: 230503
2015-02-26 00:25:00 +08:00
|
|
|
for (EHScopeStack::iterator I = EHStack.begin(), E = EHStack.end(); I != E;
|
|
|
|
++I) {
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
switch (I->getKind()) {
|
2010-07-21 15:22:38 +08:00
|
|
|
case EHScope::Cleanup:
|
2011-08-11 10:22:43 +08:00
|
|
|
// If we have a cleanup, remember that.
|
|
|
|
hasCleanup = (hasCleanup || cast<EHCleanupScope>(*I).isEHCleanup());
|
2010-07-14 04:32:21 +08:00
|
|
|
continue;
|
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
case EHScope::Filter: {
|
|
|
|
assert(I.next() == EHStack.end() && "EH filter is not end of EH stack");
|
2011-08-11 10:22:43 +08:00
|
|
|
assert(!hasCatchAll && "EH filter reached after catch-all");
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2011-09-20 04:31:14 +08:00
|
|
|
// Filter scopes get added to the landingpad in weird ways.
|
2011-08-11 10:22:43 +08:00
|
|
|
EHFilterScope &filter = cast<EHFilterScope>(*I);
|
|
|
|
hasFilter = true;
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2011-09-23 04:32:54 +08:00
|
|
|
// Add all the filter values.
|
|
|
|
for (unsigned i = 0, e = filter.getNumFilters(); i != e; ++i)
|
|
|
|
filterTypes.push_back(filter.getFilter(i));
|
2010-07-06 09:34:17 +08:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
case EHScope::Terminate:
|
|
|
|
// Terminate scopes are basically catch-alls.
|
2011-08-11 10:22:43 +08:00
|
|
|
assert(!hasCatchAll);
|
|
|
|
hasCatchAll = true;
|
2010-07-06 09:34:17 +08:00
|
|
|
goto done;
|
|
|
|
|
|
|
|
case EHScope::Catch:
|
|
|
|
break;
|
2015-08-01 01:58:45 +08:00
|
|
|
|
2015-09-11 06:11:13 +08:00
|
|
|
case EHScope::PadEnd:
|
|
|
|
llvm_unreachable("PadEnd unnecessary for Itanium!");
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
|
|
|
|
2011-08-11 10:22:43 +08:00
|
|
|
EHCatchScope &catchScope = cast<EHCatchScope>(*I);
|
|
|
|
for (unsigned hi = 0, he = catchScope.getNumHandlers(); hi != he; ++hi) {
|
|
|
|
EHCatchScope::Handler handler = catchScope.getHandler(hi);
|
2015-09-17 04:15:55 +08:00
|
|
|
assert(handler.Type.Flags == 0 &&
|
|
|
|
"landingpads do not support catch handler flags");
|
2011-08-11 10:22:43 +08:00
|
|
|
|
|
|
|
// If this is a catch-all, register that and abort.
|
2015-09-17 04:15:55 +08:00
|
|
|
if (!handler.Type.RTTI) {
|
2011-08-11 10:22:43 +08:00
|
|
|
assert(!hasCatchAll);
|
|
|
|
hasCatchAll = true;
|
|
|
|
goto done;
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check whether we already have a handler for this type.
|
2015-09-17 04:15:55 +08:00
|
|
|
if (catchTypes.insert(handler.Type.RTTI).second)
|
2011-09-20 04:31:14 +08:00
|
|
|
// If not, add it directly to the landingpad.
|
2015-09-17 04:15:55 +08:00
|
|
|
LPadInst->addClause(handler.Type.RTTI);
|
2009-11-21 07:44:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
done:
|
2011-09-20 04:31:14 +08:00
|
|
|
// If we have a catch-all, add null to the landingpad.
|
2011-08-11 10:22:43 +08:00
|
|
|
assert(!(hasCatchAll && hasFilter));
|
|
|
|
if (hasCatchAll) {
|
2011-09-20 04:31:14 +08:00
|
|
|
LPadInst->addClause(getCatchAllValue(*this));
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
// If we have an EH filter, we need to add those handlers in the
|
2011-09-20 04:31:14 +08:00
|
|
|
// right place in the landingpad, which is to say, at the end.
|
2011-08-11 10:22:43 +08:00
|
|
|
} else if (hasFilter) {
|
2011-09-20 06:08:36 +08:00
|
|
|
// Create a filter expression: a constant array indicating which filter
|
|
|
|
// types there are. The personality routine only lands here if the filter
|
|
|
|
// doesn't match.
|
2013-01-13 03:30:44 +08:00
|
|
|
SmallVector<llvm::Constant*, 8> Filters;
|
2011-09-20 04:31:14 +08:00
|
|
|
llvm::ArrayType *AType =
|
|
|
|
llvm::ArrayType::get(!filterTypes.empty() ?
|
|
|
|
filterTypes[0]->getType() : Int8PtrTy,
|
|
|
|
filterTypes.size());
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = filterTypes.size(); i != e; ++i)
|
|
|
|
Filters.push_back(cast<llvm::Constant>(filterTypes[i]));
|
|
|
|
llvm::Constant *FilterArray = llvm::ConstantArray::get(AType, Filters);
|
|
|
|
LPadInst->addClause(FilterArray);
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
// Also check whether we need a cleanup.
|
2011-09-20 04:31:14 +08:00
|
|
|
if (hasCleanup)
|
|
|
|
LPadInst->setCleanup(true);
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
// Otherwise, signal that we at least have cleanups.
|
2014-07-01 19:47:10 +08:00
|
|
|
} else if (hasCleanup) {
|
|
|
|
LPadInst->setCleanup(true);
|
2009-12-01 11:41:18 +08:00
|
|
|
}
|
2009-11-21 07:44:51 +08:00
|
|
|
|
2011-09-20 04:31:14 +08:00
|
|
|
assert((LPadInst->getNumClauses() > 0 || LPadInst->isCleanup()) &&
|
|
|
|
"landingpad instruction has no clauses!");
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
// Tell the backend how to generate the landing pad.
|
2011-08-11 10:22:43 +08:00
|
|
|
Builder.CreateBr(getEHDispatchBlock(EHStack.getInnermostEHScope()));
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
// Restore the old IR generation state.
|
2011-08-11 10:22:43 +08:00
|
|
|
Builder.restoreIP(savedIP);
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2011-08-11 10:22:43 +08:00
|
|
|
return lpad;
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
2009-12-01 11:41:18 +08:00
|
|
|
|
2015-12-12 13:39:21 +08:00
|
|
|
static void emitCatchPadBlock(CodeGenFunction &CGF, EHCatchScope &CatchScope) {
|
2015-08-01 01:58:45 +08:00
|
|
|
llvm::BasicBlock *DispatchBlock = CatchScope.getCachedEHDispatchBlock();
|
|
|
|
assert(DispatchBlock);
|
|
|
|
|
|
|
|
CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveIP();
|
|
|
|
CGF.EmitBlockAfterUses(DispatchBlock);
|
|
|
|
|
2015-12-12 13:39:21 +08:00
|
|
|
llvm::Value *ParentPad = CGF.CurrentFuncletPad;
|
|
|
|
if (!ParentPad)
|
|
|
|
ParentPad = llvm::ConstantTokenNone::get(CGF.getLLVMContext());
|
|
|
|
llvm::BasicBlock *UnwindBB =
|
|
|
|
CGF.getEHDispatchBlock(CatchScope.getEnclosingEHScope());
|
|
|
|
|
|
|
|
unsigned NumHandlers = CatchScope.getNumHandlers();
|
|
|
|
llvm::CatchSwitchInst *CatchSwitch =
|
|
|
|
CGF.Builder.CreateCatchSwitch(ParentPad, UnwindBB, NumHandlers);
|
2015-08-01 01:58:45 +08:00
|
|
|
|
|
|
|
// Test against each of the exception types we claim to catch.
|
2015-12-12 13:39:21 +08:00
|
|
|
for (unsigned I = 0; I < NumHandlers; ++I) {
|
2015-08-01 01:58:45 +08:00
|
|
|
const EHCatchScope::Handler &Handler = CatchScope.getHandler(I);
|
|
|
|
|
2015-09-17 04:15:55 +08:00
|
|
|
CatchTypeInfo TypeInfo = Handler.Type;
|
|
|
|
if (!TypeInfo.RTTI)
|
|
|
|
TypeInfo.RTTI = llvm::Constant::getNullValue(CGF.VoidPtrTy);
|
2015-08-01 01:58:45 +08:00
|
|
|
|
2015-12-12 13:39:21 +08:00
|
|
|
CGF.Builder.SetInsertPoint(Handler.Block);
|
2015-08-01 01:58:45 +08:00
|
|
|
|
|
|
|
if (EHPersonality::get(CGF).isMSVCXXPersonality()) {
|
2015-12-12 13:39:21 +08:00
|
|
|
CGF.Builder.CreateCatchPad(
|
|
|
|
CatchSwitch, {TypeInfo.RTTI, CGF.Builder.getInt32(TypeInfo.Flags),
|
|
|
|
llvm::Constant::getNullValue(CGF.VoidPtrTy)});
|
2015-08-01 01:58:45 +08:00
|
|
|
} else {
|
2015-12-12 13:39:21 +08:00
|
|
|
CGF.Builder.CreateCatchPad(CatchSwitch, {TypeInfo.RTTI});
|
2015-08-01 01:58:45 +08:00
|
|
|
}
|
|
|
|
|
2015-12-12 13:39:21 +08:00
|
|
|
CatchSwitch->addHandler(Handler.Block);
|
2015-08-01 01:58:45 +08:00
|
|
|
}
|
|
|
|
CGF.Builder.restoreIP(SavedIP);
|
|
|
|
}
|
|
|
|
|
2011-08-11 10:22:43 +08:00
|
|
|
/// Emit the structure of the dispatch block for the given catch scope.
|
|
|
|
/// It is an invariant that the dispatch block already exists.
|
2015-12-12 13:39:21 +08:00
|
|
|
static void emitCatchDispatchBlock(CodeGenFunction &CGF,
|
|
|
|
EHCatchScope &catchScope) {
|
2015-10-08 09:13:52 +08:00
|
|
|
if (EHPersonality::get(CGF).usesFuncletPads())
|
|
|
|
return emitCatchPadBlock(CGF, catchScope);
|
2015-08-01 01:58:45 +08:00
|
|
|
|
2011-08-11 10:22:43 +08:00
|
|
|
llvm::BasicBlock *dispatchBlock = catchScope.getCachedEHDispatchBlock();
|
|
|
|
assert(dispatchBlock);
|
|
|
|
|
|
|
|
// If there's only a single catch-all, getEHDispatchBlock returned
|
|
|
|
// that catch-all as the dispatch block.
|
|
|
|
if (catchScope.getNumHandlers() == 1 &&
|
|
|
|
catchScope.getHandler(0).isCatchAll()) {
|
|
|
|
assert(dispatchBlock == catchScope.getHandler(0).Block);
|
2015-12-12 13:39:21 +08:00
|
|
|
return;
|
2011-08-11 10:22:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
CGBuilderTy::InsertPoint savedIP = CGF.Builder.saveIP();
|
|
|
|
CGF.EmitBlockAfterUses(dispatchBlock);
|
|
|
|
|
|
|
|
// Select the right handler.
|
|
|
|
llvm::Value *llvm_eh_typeid_for =
|
|
|
|
CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
|
|
|
|
|
|
|
|
// Load the selector value.
|
2011-09-16 02:57:19 +08:00
|
|
|
llvm::Value *selector = CGF.getSelectorFromSlot();
|
2011-08-11 10:22:43 +08:00
|
|
|
|
|
|
|
// Test against each of the exception types we claim to catch.
|
|
|
|
for (unsigned i = 0, e = catchScope.getNumHandlers(); ; ++i) {
|
|
|
|
assert(i < e && "ran off end of handlers!");
|
|
|
|
const EHCatchScope::Handler &handler = catchScope.getHandler(i);
|
|
|
|
|
2015-09-17 04:15:55 +08:00
|
|
|
llvm::Value *typeValue = handler.Type.RTTI;
|
|
|
|
assert(handler.Type.Flags == 0 &&
|
|
|
|
"landingpads do not support catch handler flags");
|
2011-08-11 10:22:43 +08:00
|
|
|
assert(typeValue && "fell into catch-all case!");
|
|
|
|
typeValue = CGF.Builder.CreateBitCast(typeValue, CGF.Int8PtrTy);
|
|
|
|
|
|
|
|
// Figure out the next block.
|
|
|
|
bool nextIsEnd;
|
|
|
|
llvm::BasicBlock *nextBlock;
|
|
|
|
|
|
|
|
// If this is the last handler, we're at the end, and the next
|
|
|
|
// block is the block for the enclosing EH scope.
|
|
|
|
if (i + 1 == e) {
|
|
|
|
nextBlock = CGF.getEHDispatchBlock(catchScope.getEnclosingEHScope());
|
|
|
|
nextIsEnd = true;
|
|
|
|
|
|
|
|
// If the next handler is a catch-all, we're at the end, and the
|
|
|
|
// next block is that handler.
|
|
|
|
} else if (catchScope.getHandler(i+1).isCatchAll()) {
|
|
|
|
nextBlock = catchScope.getHandler(i+1).Block;
|
|
|
|
nextIsEnd = true;
|
|
|
|
|
|
|
|
// Otherwise, we're not at the end and we need a new block.
|
|
|
|
} else {
|
|
|
|
nextBlock = CGF.createBasicBlock("catch.fallthrough");
|
|
|
|
nextIsEnd = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Figure out the catch type's index in the LSDA's type table.
|
|
|
|
llvm::CallInst *typeIndex =
|
|
|
|
CGF.Builder.CreateCall(llvm_eh_typeid_for, typeValue);
|
|
|
|
typeIndex->setDoesNotThrow();
|
|
|
|
|
|
|
|
llvm::Value *matchesTypeIndex =
|
|
|
|
CGF.Builder.CreateICmpEQ(selector, typeIndex, "matches");
|
|
|
|
CGF.Builder.CreateCondBr(matchesTypeIndex, handler.Block, nextBlock);
|
|
|
|
|
|
|
|
// If the next handler is a catch-all, we're completely done.
|
|
|
|
if (nextIsEnd) {
|
|
|
|
CGF.Builder.restoreIP(savedIP);
|
2015-12-12 13:39:21 +08:00
|
|
|
return;
|
2011-08-11 10:22:43 +08:00
|
|
|
}
|
2012-02-19 19:57:29 +08:00
|
|
|
// Otherwise we need to emit and continue at that block.
|
|
|
|
CGF.EmitBlock(nextBlock);
|
2011-08-11 10:22:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CodeGenFunction::popCatchScope() {
|
|
|
|
EHCatchScope &catchScope = cast<EHCatchScope>(*EHStack.begin());
|
|
|
|
if (catchScope.hasEHBranches())
|
|
|
|
emitCatchDispatchBlock(*this, catchScope);
|
|
|
|
EHStack.popCatch();
|
|
|
|
}
|
|
|
|
|
2010-07-07 14:56:46 +08:00
|
|
|
void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
|
2010-07-06 09:34:17 +08:00
|
|
|
unsigned NumHandlers = S.getNumHandlers();
|
|
|
|
EHCatchScope &CatchScope = cast<EHCatchScope>(*EHStack.begin());
|
|
|
|
assert(CatchScope.getNumHandlers() == NumHandlers);
|
|
|
|
|
2011-08-11 10:22:43 +08:00
|
|
|
// If the catch was not required, bail out now.
|
|
|
|
if (!CatchScope.hasEHBranches()) {
|
2014-01-09 17:22:32 +08:00
|
|
|
CatchScope.clearHandlerBlocks();
|
2011-08-11 10:22:43 +08:00
|
|
|
EHStack.popCatch();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Emit the structure of the EH dispatch for this catch.
|
2015-12-12 13:39:21 +08:00
|
|
|
emitCatchDispatchBlock(*this, CatchScope);
|
2011-08-11 10:22:43 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// Copy the handler blocks off before we pop the EH stack. Emitting
|
|
|
|
// the handlers might scribble on this memory.
|
2015-08-04 23:38:49 +08:00
|
|
|
SmallVector<EHCatchScope::Handler, 8> Handlers(
|
|
|
|
CatchScope.begin(), CatchScope.begin() + NumHandlers);
|
2011-08-11 10:22:43 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
EHStack.popCatch();
|
|
|
|
|
|
|
|
// The fall-through block.
|
|
|
|
llvm::BasicBlock *ContBB = createBasicBlock("try.cont");
|
|
|
|
|
|
|
|
// We just emitted the body of the try; jump to the continue block.
|
|
|
|
if (HaveInsertPoint())
|
|
|
|
Builder.CreateBr(ContBB);
|
|
|
|
|
2012-06-15 13:27:05 +08:00
|
|
|
// Determine if we need an implicit rethrow for all these catch handlers;
|
|
|
|
// see the comment below.
|
|
|
|
bool doImplicitRethrow = false;
|
2010-07-07 14:56:46 +08:00
|
|
|
if (IsFnTryBlock)
|
2012-06-15 13:27:05 +08:00
|
|
|
doImplicitRethrow = isa<CXXDestructorDecl>(CurCodeDecl) ||
|
|
|
|
isa<CXXConstructorDecl>(CurCodeDecl);
|
2010-07-07 14:56:46 +08:00
|
|
|
|
2011-08-11 10:22:43 +08:00
|
|
|
// Perversely, we emit the handlers backwards precisely because we
|
|
|
|
// want them to appear in source order. In all of these cases, the
|
|
|
|
// catch block will have exactly one predecessor, which will be a
|
|
|
|
// particular block in the catch dispatch. However, in the case of
|
|
|
|
// a catch-all, one of the dispatch blocks will branch to two
|
|
|
|
// different handlers, and EmitBlockAfterUses will cause the second
|
|
|
|
// handler to be moved before the first.
|
|
|
|
for (unsigned I = NumHandlers; I != 0; --I) {
|
|
|
|
llvm::BasicBlock *CatchBlock = Handlers[I-1].Block;
|
|
|
|
EmitBlockAfterUses(CatchBlock);
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
// Catch the exception if this isn't a catch-all.
|
2011-08-11 10:22:43 +08:00
|
|
|
const CXXCatchStmt *C = S.getHandler(I-1);
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
// Enter a cleanup scope, including the catch variable and the
|
|
|
|
// end-catch.
|
|
|
|
RunCleanupsScope CatchScope(*this);
|
|
|
|
|
|
|
|
// Initialize the catch variable and set up the cleanups.
|
2015-12-12 13:39:21 +08:00
|
|
|
SaveAndRestore<llvm::Instruction *> RestoreCurrentFuncletPad(
|
|
|
|
CurrentFuncletPad);
|
2015-03-04 03:21:04 +08:00
|
|
|
CGM.getCXXABI().emitBeginCatch(*this, C);
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2014-01-07 08:20:28 +08:00
|
|
|
// Emit the PGO counter increment.
|
2015-04-24 07:06:47 +08:00
|
|
|
incrementProfileCounter(C);
|
2014-01-07 06:27:43 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// Perform the body of the catch.
|
|
|
|
EmitStmt(C->getHandlerBlock());
|
|
|
|
|
2012-06-15 13:27:05 +08:00
|
|
|
// [except.handle]p11:
|
|
|
|
// The currently handled exception is rethrown if control
|
|
|
|
// reaches the end of a handler of the function-try-block of a
|
|
|
|
// constructor or destructor.
|
|
|
|
|
|
|
|
// It is important that we only do this on fallthrough and not on
|
|
|
|
// return. Note that it's illegal to put a return in a
|
|
|
|
// constructor function-try-block's catch handler (p14), so this
|
|
|
|
// really only applies to destructors.
|
|
|
|
if (doImplicitRethrow && HaveInsertPoint()) {
|
2014-11-25 15:20:20 +08:00
|
|
|
CGM.getCXXABI().emitRethrow(*this, /*isNoReturn*/false);
|
2012-06-15 13:27:05 +08:00
|
|
|
Builder.CreateUnreachable();
|
|
|
|
Builder.ClearInsertionPoint();
|
|
|
|
}
|
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// Fall out through the catch cleanups.
|
|
|
|
CatchScope.ForceCleanup();
|
|
|
|
|
|
|
|
// Branch out of the try.
|
|
|
|
if (HaveInsertPoint())
|
|
|
|
Builder.CreateBr(ContBB);
|
|
|
|
}
|
2009-11-21 07:44:51 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
EmitBlock(ContBB);
|
2015-04-24 07:06:47 +08:00
|
|
|
incrementProfileCounter(&S);
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
|
|
|
|
2010-07-21 08:52:03 +08:00
|
|
|
namespace {
|
2015-08-19 06:40:54 +08:00
|
|
|
struct CallEndCatchForFinally final : EHScopeStack::Cleanup {
|
2010-07-21 08:52:03 +08:00
|
|
|
llvm::Value *ForEHVar;
|
|
|
|
llvm::Value *EndCatchFn;
|
|
|
|
CallEndCatchForFinally(llvm::Value *ForEHVar, llvm::Value *EndCatchFn)
|
|
|
|
: ForEHVar(ForEHVar), EndCatchFn(EndCatchFn) {}
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2010-07-21 08:52:03 +08:00
|
|
|
llvm::BasicBlock *EndCatchBB = CGF.createBasicBlock("finally.endcatch");
|
|
|
|
llvm::BasicBlock *CleanupContBB =
|
|
|
|
CGF.createBasicBlock("finally.cleanup.cont");
|
|
|
|
|
|
|
|
llvm::Value *ShouldEndCatch =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGF.Builder.CreateFlagLoad(ForEHVar, "finally.endcatch");
|
2010-07-21 08:52:03 +08:00
|
|
|
CGF.Builder.CreateCondBr(ShouldEndCatch, EndCatchBB, CleanupContBB);
|
|
|
|
CGF.EmitBlock(EndCatchBB);
|
2013-03-01 03:01:20 +08:00
|
|
|
CGF.EmitRuntimeCallOrInvoke(EndCatchFn); // catch-all, so might throw
|
2010-07-21 08:52:03 +08:00
|
|
|
CGF.EmitBlock(CleanupContBB);
|
|
|
|
}
|
|
|
|
};
|
2010-07-21 13:47:49 +08:00
|
|
|
|
2015-08-19 06:40:54 +08:00
|
|
|
struct PerformFinally final : EHScopeStack::Cleanup {
|
2010-07-21 13:47:49 +08:00
|
|
|
const Stmt *Body;
|
|
|
|
llvm::Value *ForEHVar;
|
|
|
|
llvm::Value *EndCatchFn;
|
|
|
|
llvm::Value *RethrowFn;
|
|
|
|
llvm::Value *SavedExnVar;
|
|
|
|
|
|
|
|
PerformFinally(const Stmt *Body, llvm::Value *ForEHVar,
|
|
|
|
llvm::Value *EndCatchFn,
|
|
|
|
llvm::Value *RethrowFn, llvm::Value *SavedExnVar)
|
|
|
|
: Body(Body), ForEHVar(ForEHVar), EndCatchFn(EndCatchFn),
|
|
|
|
RethrowFn(RethrowFn), SavedExnVar(SavedExnVar) {}
|
|
|
|
|
2014-03-12 14:41:41 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags flags) override {
|
2010-07-21 13:47:49 +08:00
|
|
|
// Enter a cleanup to call the end-catch function if one was provided.
|
|
|
|
if (EndCatchFn)
|
2010-07-21 15:22:38 +08:00
|
|
|
CGF.EHStack.pushCleanup<CallEndCatchForFinally>(NormalAndEHCleanup,
|
|
|
|
ForEHVar, EndCatchFn);
|
2010-07-21 13:47:49 +08:00
|
|
|
|
2010-08-11 08:16:14 +08:00
|
|
|
// Save the current cleanup destination in case there are
|
|
|
|
// cleanups in the finally block.
|
|
|
|
llvm::Value *SavedCleanupDest =
|
|
|
|
CGF.Builder.CreateLoad(CGF.getNormalCleanupDestSlot(),
|
|
|
|
"cleanup.dest.saved");
|
|
|
|
|
2010-07-21 13:47:49 +08:00
|
|
|
// Emit the finally block.
|
|
|
|
CGF.EmitStmt(Body);
|
|
|
|
|
|
|
|
// If the end of the finally is reachable, check whether this was
|
|
|
|
// for EH. If so, rethrow.
|
|
|
|
if (CGF.HaveInsertPoint()) {
|
|
|
|
llvm::BasicBlock *RethrowBB = CGF.createBasicBlock("finally.rethrow");
|
|
|
|
llvm::BasicBlock *ContBB = CGF.createBasicBlock("finally.cont");
|
|
|
|
|
|
|
|
llvm::Value *ShouldRethrow =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGF.Builder.CreateFlagLoad(ForEHVar, "finally.shouldthrow");
|
2010-07-21 13:47:49 +08:00
|
|
|
CGF.Builder.CreateCondBr(ShouldRethrow, RethrowBB, ContBB);
|
|
|
|
|
|
|
|
CGF.EmitBlock(RethrowBB);
|
|
|
|
if (SavedExnVar) {
|
2013-03-01 03:01:20 +08:00
|
|
|
CGF.EmitRuntimeCallOrInvoke(RethrowFn,
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGF.Builder.CreateAlignedLoad(SavedExnVar, CGF.getPointerAlign()));
|
2010-07-21 13:47:49 +08:00
|
|
|
} else {
|
2013-03-01 03:01:20 +08:00
|
|
|
CGF.EmitRuntimeCallOrInvoke(RethrowFn);
|
2010-07-21 13:47:49 +08:00
|
|
|
}
|
|
|
|
CGF.Builder.CreateUnreachable();
|
|
|
|
|
|
|
|
CGF.EmitBlock(ContBB);
|
2010-08-11 08:16:14 +08:00
|
|
|
|
|
|
|
// Restore the cleanup destination.
|
|
|
|
CGF.Builder.CreateStore(SavedCleanupDest,
|
|
|
|
CGF.getNormalCleanupDestSlot());
|
2010-07-21 13:47:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Leave the end-catch cleanup. As an optimization, pretend that
|
|
|
|
// the fallthrough path was inaccessible; we've dynamically proven
|
|
|
|
// that we're not in the EH case along that path.
|
|
|
|
if (EndCatchFn) {
|
|
|
|
CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
|
|
|
|
CGF.PopCleanupBlock();
|
|
|
|
CGF.Builder.restoreIP(SavedIP);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now make sure we actually have an insertion point or the
|
|
|
|
// cleanup gods will hate us.
|
|
|
|
CGF.EnsureInsertPoint();
|
|
|
|
}
|
|
|
|
};
|
2015-10-07 07:40:43 +08:00
|
|
|
} // end anonymous namespace
|
2010-07-21 08:52:03 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
/// Enters a finally block for an implementation using zero-cost
|
|
|
|
/// exceptions. This is mostly general, but hard-codes some
|
|
|
|
/// language/ABI-specific behavior in the catch-all sections.
|
2011-06-22 10:32:12 +08:00
|
|
|
void CodeGenFunction::FinallyInfo::enter(CodeGenFunction &CGF,
|
|
|
|
const Stmt *body,
|
|
|
|
llvm::Constant *beginCatchFn,
|
|
|
|
llvm::Constant *endCatchFn,
|
|
|
|
llvm::Constant *rethrowFn) {
|
2014-05-21 13:09:00 +08:00
|
|
|
assert((beginCatchFn != nullptr) == (endCatchFn != nullptr) &&
|
2010-07-06 09:34:17 +08:00
|
|
|
"begin/end catch functions not paired");
|
2011-06-22 10:32:12 +08:00
|
|
|
assert(rethrowFn && "rethrow function is required");
|
|
|
|
|
|
|
|
BeginCatchFn = beginCatchFn;
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
// The rethrow function has one of the following two types:
|
|
|
|
// void (*)()
|
|
|
|
// void (*)(void*)
|
|
|
|
// In the latter case we need to pass it the exception object.
|
|
|
|
// But we can't use the exception slot because the @finally might
|
|
|
|
// have a landing pad (which would overwrite the exception slot).
|
2011-07-18 12:24:23 +08:00
|
|
|
llvm::FunctionType *rethrowFnTy =
|
2010-07-06 09:34:17 +08:00
|
|
|
cast<llvm::FunctionType>(
|
2011-06-22 10:32:12 +08:00
|
|
|
cast<llvm::PointerType>(rethrowFn->getType())->getElementType());
|
2014-05-21 13:09:00 +08:00
|
|
|
SavedExnVar = nullptr;
|
2011-06-22 10:32:12 +08:00
|
|
|
if (rethrowFnTy->getNumParams())
|
|
|
|
SavedExnVar = CGF.CreateTempAlloca(CGF.Int8PtrTy, "finally.exn");
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
// A finally block is a statement which must be executed on any edge
|
|
|
|
// out of a given scope. Unlike a cleanup, the finally block may
|
|
|
|
// contain arbitrary control flow leading out of itself. In
|
|
|
|
// addition, finally blocks should always be executed, even if there
|
|
|
|
// are no catch handlers higher on the stack. Therefore, we
|
|
|
|
// surround the protected scope with a combination of a normal
|
|
|
|
// cleanup (to catch attempts to break out of the block via normal
|
|
|
|
// control flow) and an EH catch-all (semantically "outside" any try
|
|
|
|
// statement to which the finally block might have been attached).
|
|
|
|
// The finally block itself is generated in the context of a cleanup
|
|
|
|
// which conditionally leaves the catch-all.
|
|
|
|
|
|
|
|
// Jump destination for performing the finally block on an exception
|
|
|
|
// edge. We'll never actually reach this block, so unreachable is
|
|
|
|
// fine.
|
2011-06-22 10:32:12 +08:00
|
|
|
RethrowDest = CGF.getJumpDestInCurrentScope(CGF.getUnreachableBlock());
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
// Whether the finally block is being executed for EH purposes.
|
2011-06-22 10:32:12 +08:00
|
|
|
ForEHVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "finally.for-eh");
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGF.Builder.CreateFlagStore(false, ForEHVar);
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
// Enter a normal cleanup which will perform the @finally block.
|
2011-06-22 10:32:12 +08:00
|
|
|
CGF.EHStack.pushCleanup<PerformFinally>(NormalCleanup, body,
|
|
|
|
ForEHVar, endCatchFn,
|
|
|
|
rethrowFn, SavedExnVar);
|
2010-07-06 09:34:17 +08:00
|
|
|
|
|
|
|
// Enter a catch-all scope.
|
2011-06-22 10:32:12 +08:00
|
|
|
llvm::BasicBlock *catchBB = CGF.createBasicBlock("finally.catchall");
|
|
|
|
EHCatchScope *catchScope = CGF.EHStack.pushCatch(1);
|
|
|
|
catchScope->setCatchAllHandler(0, catchBB);
|
|
|
|
}
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2011-06-22 10:32:12 +08:00
|
|
|
void CodeGenFunction::FinallyInfo::exit(CodeGenFunction &CGF) {
|
|
|
|
// Leave the finally catch-all.
|
|
|
|
EHCatchScope &catchScope = cast<EHCatchScope>(*CGF.EHStack.begin());
|
|
|
|
llvm::BasicBlock *catchBB = catchScope.getHandler(0).Block;
|
2011-08-11 10:22:43 +08:00
|
|
|
|
|
|
|
CGF.popCatchScope();
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2011-06-22 10:32:12 +08:00
|
|
|
// If there are any references to the catch-all block, emit it.
|
|
|
|
if (catchBB->use_empty()) {
|
|
|
|
delete catchBB;
|
|
|
|
} else {
|
|
|
|
CGBuilderTy::InsertPoint savedIP = CGF.Builder.saveAndClearIP();
|
|
|
|
CGF.EmitBlock(catchBB);
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2014-05-21 13:09:00 +08:00
|
|
|
llvm::Value *exn = nullptr;
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2011-06-22 10:32:12 +08:00
|
|
|
// If there's a begin-catch function, call it.
|
|
|
|
if (BeginCatchFn) {
|
2011-09-16 02:57:19 +08:00
|
|
|
exn = CGF.getExceptionFromSlot();
|
2013-03-01 03:01:20 +08:00
|
|
|
CGF.EmitNounwindRuntimeCall(BeginCatchFn, exn);
|
2011-06-22 10:32:12 +08:00
|
|
|
}
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2011-06-22 10:32:12 +08:00
|
|
|
// If we need to remember the exception pointer to rethrow later, do so.
|
|
|
|
if (SavedExnVar) {
|
2011-09-16 02:57:19 +08:00
|
|
|
if (!exn) exn = CGF.getExceptionFromSlot();
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGF.Builder.CreateAlignedStore(exn, SavedExnVar, CGF.getPointerAlign());
|
2011-06-22 10:32:12 +08:00
|
|
|
}
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2011-06-22 10:32:12 +08:00
|
|
|
// Tell the cleanups in the finally block that we're do this for EH.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGF.Builder.CreateFlagStore(true, ForEHVar);
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2011-06-22 10:32:12 +08:00
|
|
|
// Thread a jump through the finally cleanup.
|
|
|
|
CGF.EmitBranchThroughCleanup(RethrowDest);
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2011-06-22 10:32:12 +08:00
|
|
|
CGF.Builder.restoreIP(savedIP);
|
|
|
|
}
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2011-06-22 10:32:12 +08:00
|
|
|
// Finally, leave the @finally cleanup.
|
|
|
|
CGF.PopCleanupBlock();
|
2010-07-06 09:34:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
|
|
|
|
if (TerminateLandingPad)
|
|
|
|
return TerminateLandingPad;
|
|
|
|
|
|
|
|
CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
|
|
|
|
|
|
|
|
// This will get inserted at the end of the function.
|
|
|
|
TerminateLandingPad = createBasicBlock("terminate.lpad");
|
|
|
|
Builder.SetInsertPoint(TerminateLandingPad);
|
|
|
|
|
|
|
|
// Tell the backend that this is a landing pad.
|
2015-02-06 02:56:03 +08:00
|
|
|
const EHPersonality &Personality = EHPersonality::get(*this);
|
2015-06-18 04:53:19 +08:00
|
|
|
|
|
|
|
if (!CurFn->hasPersonalityFn())
|
|
|
|
CurFn->setPersonalityFn(getOpaquePersonalityFn(CGM, Personality));
|
|
|
|
|
2017-05-10 03:31:30 +08:00
|
|
|
llvm::LandingPadInst *LPadInst =
|
|
|
|
Builder.CreateLandingPad(llvm::StructType::get(Int8PtrTy, Int32Ty), 0);
|
2011-09-20 04:31:14 +08:00
|
|
|
LPadInst->addClause(getCatchAllValue(*this));
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2015-10-07 07:40:43 +08:00
|
|
|
llvm::Value *Exn = nullptr;
|
2015-03-04 03:21:04 +08:00
|
|
|
if (getLangOpts().CPlusPlus)
|
|
|
|
Exn = Builder.CreateExtractValue(LPadInst, 0);
|
|
|
|
llvm::CallInst *terminateCall =
|
|
|
|
CGM.getCXXABI().emitTerminateForUnexpectedException(*this, Exn);
|
2013-02-12 11:51:46 +08:00
|
|
|
terminateCall->setDoesNotReturn();
|
2011-02-08 16:22:06 +08:00
|
|
|
Builder.CreateUnreachable();
|
2009-12-09 11:35:49 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// Restore the saved insertion state.
|
|
|
|
Builder.restoreIP(SavedIP);
|
2010-04-30 08:06:43 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
return TerminateLandingPad;
|
2009-12-09 11:35:49 +08:00
|
|
|
}
|
2009-12-10 06:59:31 +08:00
|
|
|
|
|
|
|
llvm::BasicBlock *CodeGenFunction::getTerminateHandler() {
|
2009-12-10 08:02:42 +08:00
|
|
|
if (TerminateHandler)
|
|
|
|
return TerminateHandler;
|
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
|
2009-12-10 06:59:31 +08:00
|
|
|
|
2010-07-06 09:34:17 +08:00
|
|
|
// Set up the terminate handler. This block is inserted at the very
|
|
|
|
// end of the function by FinishFunction.
|
2009-12-10 08:02:42 +08:00
|
|
|
TerminateHandler = createBasicBlock("terminate.handler");
|
2010-07-06 09:34:17 +08:00
|
|
|
Builder.SetInsertPoint(TerminateHandler);
|
2015-12-15 02:34:18 +08:00
|
|
|
llvm::Value *Exn = nullptr;
|
2016-02-25 01:02:45 +08:00
|
|
|
SaveAndRestore<llvm::Instruction *> RestoreCurrentFuncletPad(
|
|
|
|
CurrentFuncletPad);
|
2015-10-08 09:13:52 +08:00
|
|
|
if (EHPersonality::get(*this).usesFuncletPads()) {
|
2015-12-12 13:39:21 +08:00
|
|
|
llvm::Value *ParentPad = CurrentFuncletPad;
|
|
|
|
if (!ParentPad)
|
|
|
|
ParentPad = llvm::ConstantTokenNone::get(CGM.getLLVMContext());
|
2016-02-25 01:02:45 +08:00
|
|
|
CurrentFuncletPad = Builder.CreateCleanupPad(ParentPad);
|
2015-08-01 01:58:45 +08:00
|
|
|
} else {
|
|
|
|
if (getLangOpts().CPlusPlus)
|
|
|
|
Exn = getExceptionFromSlot();
|
|
|
|
}
|
2015-12-15 02:34:18 +08:00
|
|
|
llvm::CallInst *terminateCall =
|
|
|
|
CGM.getCXXABI().emitTerminateForUnexpectedException(*this, Exn);
|
|
|
|
terminateCall->setDoesNotReturn();
|
|
|
|
Builder.CreateUnreachable();
|
2009-12-10 06:59:31 +08:00
|
|
|
|
2010-04-21 18:05:39 +08:00
|
|
|
// Restore the saved insertion state.
|
2010-07-06 09:34:17 +08:00
|
|
|
Builder.restoreIP(SavedIP);
|
2009-12-10 07:31:35 +08:00
|
|
|
|
2009-12-10 06:59:31 +08:00
|
|
|
return TerminateHandler;
|
|
|
|
}
|
2010-07-06 09:34:17 +08:00
|
|
|
|
2012-11-08 00:50:40 +08:00
|
|
|
llvm::BasicBlock *CodeGenFunction::getEHResumeBlock(bool isCleanup) {
|
2011-08-11 10:22:43 +08:00
|
|
|
if (EHResumeBlock) return EHResumeBlock;
|
2010-07-24 05:56:41 +08:00
|
|
|
|
|
|
|
CGBuilderTy::InsertPoint SavedIP = Builder.saveIP();
|
|
|
|
|
|
|
|
// We emit a jump to a notional label at the outermost unwind state.
|
2011-08-11 10:22:43 +08:00
|
|
|
EHResumeBlock = createBasicBlock("eh.resume");
|
|
|
|
Builder.SetInsertPoint(EHResumeBlock);
|
2010-07-24 05:56:41 +08:00
|
|
|
|
2015-02-06 02:56:03 +08:00
|
|
|
const EHPersonality &Personality = EHPersonality::get(*this);
|
2010-07-24 05:56:41 +08:00
|
|
|
|
|
|
|
// This can always be a call because we necessarily didn't find
|
|
|
|
// anything on the EH stack which needs our help.
|
2012-02-08 20:41:24 +08:00
|
|
|
const char *RethrowName = Personality.CatchallRethrowFn;
|
2014-05-21 13:09:00 +08:00
|
|
|
if (RethrowName != nullptr && !isCleanup) {
|
2013-03-01 03:01:20 +08:00
|
|
|
EmitRuntimeCall(getCatchallRethrowFn(CGM, RethrowName),
|
2015-02-27 06:34:33 +08:00
|
|
|
getExceptionFromSlot())->setDoesNotReturn();
|
2014-07-01 19:47:10 +08:00
|
|
|
Builder.CreateUnreachable();
|
|
|
|
Builder.restoreIP(SavedIP);
|
|
|
|
return EHResumeBlock;
|
2011-05-29 05:13:02 +08:00
|
|
|
}
|
2010-07-24 05:56:41 +08:00
|
|
|
|
2014-07-01 19:47:10 +08:00
|
|
|
// Recreate the landingpad's return value for the 'resume' instruction.
|
|
|
|
llvm::Value *Exn = getExceptionFromSlot();
|
|
|
|
llvm::Value *Sel = getSelectorFromSlot();
|
2010-07-24 05:56:41 +08:00
|
|
|
|
2017-05-10 03:31:30 +08:00
|
|
|
llvm::Type *LPadType = llvm::StructType::get(Exn->getType(), Sel->getType());
|
2014-07-01 19:47:10 +08:00
|
|
|
llvm::Value *LPadVal = llvm::UndefValue::get(LPadType);
|
|
|
|
LPadVal = Builder.CreateInsertValue(LPadVal, Exn, 0, "lpad.val");
|
|
|
|
LPadVal = Builder.CreateInsertValue(LPadVal, Sel, 1, "lpad.val");
|
2010-07-24 05:56:41 +08:00
|
|
|
|
2014-07-01 19:47:10 +08:00
|
|
|
Builder.CreateResume(LPadVal);
|
|
|
|
Builder.restoreIP(SavedIP);
|
2011-08-11 10:22:43 +08:00
|
|
|
return EHResumeBlock;
|
2010-07-24 05:56:41 +08:00
|
|
|
}
|
2013-09-17 05:46:30 +08:00
|
|
|
|
|
|
|
void CodeGenFunction::EmitSEHTryStmt(const SEHTryStmt &S) {
|
2015-04-15 04:59:00 +08:00
|
|
|
EnterSEHTryStmt(S);
|
2015-02-12 05:40:48 +08:00
|
|
|
{
|
2015-02-13 07:16:11 +08:00
|
|
|
JumpDest TryExit = getJumpDestInCurrentScope("__try.__leave");
|
|
|
|
|
2015-02-13 07:40:45 +08:00
|
|
|
SEHTryEpilogueStack.push_back(&TryExit);
|
2015-02-12 05:40:48 +08:00
|
|
|
EmitStmt(S.getTryBlock());
|
2015-02-13 07:40:45 +08:00
|
|
|
SEHTryEpilogueStack.pop_back();
|
2015-02-13 07:16:11 +08:00
|
|
|
|
|
|
|
if (!TryExit.getBlock()->use_empty())
|
|
|
|
EmitBlock(TryExit.getBlock(), /*IsFinished=*/true);
|
|
|
|
else
|
|
|
|
delete TryExit.getBlock();
|
2015-02-12 05:40:48 +08:00
|
|
|
}
|
2015-04-15 04:59:00 +08:00
|
|
|
ExitSEHTryStmt(S);
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
2015-08-19 06:40:54 +08:00
|
|
|
struct PerformSEHFinally final : EHScopeStack::Cleanup {
|
2015-04-15 04:59:00 +08:00
|
|
|
llvm::Function *OutlinedFinally;
|
2015-10-09 05:14:56 +08:00
|
|
|
PerformSEHFinally(llvm::Function *OutlinedFinally)
|
|
|
|
: OutlinedFinally(OutlinedFinally) {}
|
2015-02-05 06:37:07 +08:00
|
|
|
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
void Emit(CodeGenFunction &CGF, Flags F) override {
|
2015-04-15 04:59:00 +08:00
|
|
|
ASTContext &Context = CGF.getContext();
|
2015-07-07 08:36:30 +08:00
|
|
|
CodeGenModule &CGM = CGF.CGM;
|
2015-04-15 04:59:00 +08:00
|
|
|
|
|
|
|
CallArgList Args;
|
2015-07-07 08:36:30 +08:00
|
|
|
|
|
|
|
// Compute the two argument values.
|
|
|
|
QualType ArgTys[2] = {Context.UnsignedCharTy, Context.VoidPtrTy};
|
2015-07-08 07:23:31 +08:00
|
|
|
llvm::Value *LocalAddrFn = CGM.getIntrinsic(llvm::Intrinsic::localaddress);
|
2015-07-15 01:27:39 +08:00
|
|
|
llvm::Value *FP = CGF.Builder.CreateCall(LocalAddrFn);
|
2015-07-02 05:00:00 +08:00
|
|
|
llvm::Value *IsForEH =
|
|
|
|
llvm::ConstantInt::get(CGF.ConvertType(ArgTys[0]), F.isForEHCleanup());
|
|
|
|
Args.add(RValue::get(IsForEH), ArgTys[0]);
|
|
|
|
Args.add(RValue::get(FP), ArgTys[1]);
|
|
|
|
|
2015-07-07 08:36:30 +08:00
|
|
|
// Arrange a two-arg function info and type.
|
2015-07-02 05:00:00 +08:00
|
|
|
const CGFunctionInfo &FnInfo =
|
2016-03-11 12:30:31 +08:00
|
|
|
CGM.getTypes().arrangeBuiltinFunctionCall(Context.VoidTy, Args);
|
2015-07-07 08:36:30 +08:00
|
|
|
|
2016-10-27 07:46:34 +08:00
|
|
|
auto Callee = CGCallee::forDirect(OutlinedFinally);
|
|
|
|
CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args);
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
}
|
|
|
|
};
|
2015-10-07 07:40:43 +08:00
|
|
|
} // end anonymous namespace
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
|
2015-04-09 06:23:48 +08:00
|
|
|
namespace {
|
|
|
|
/// Find all local variable captures in the statement.
|
|
|
|
struct CaptureFinder : ConstStmtVisitor<CaptureFinder> {
|
|
|
|
CodeGenFunction &ParentCGF;
|
|
|
|
const VarDecl *ParentThis;
|
2015-09-09 05:15:22 +08:00
|
|
|
llvm::SmallSetVector<const VarDecl *, 4> Captures;
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address SEHCodeSlot = Address::invalid();
|
2015-04-09 06:23:48 +08:00
|
|
|
CaptureFinder(CodeGenFunction &ParentCGF, const VarDecl *ParentThis)
|
|
|
|
: ParentCGF(ParentCGF), ParentThis(ParentThis) {}
|
|
|
|
|
2015-07-07 08:36:30 +08:00
|
|
|
// Return true if we need to do any capturing work.
|
|
|
|
bool foundCaptures() {
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return !Captures.empty() || SEHCodeSlot.isValid();
|
2015-07-07 08:36:30 +08:00
|
|
|
}
|
|
|
|
|
2015-04-09 06:23:48 +08:00
|
|
|
void Visit(const Stmt *S) {
|
|
|
|
// See if this is a capture, then recurse.
|
|
|
|
ConstStmtVisitor<CaptureFinder>::Visit(S);
|
|
|
|
for (const Stmt *Child : S->children())
|
2015-04-15 04:59:00 +08:00
|
|
|
if (Child)
|
|
|
|
Visit(Child);
|
2015-04-09 06:23:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void VisitDeclRefExpr(const DeclRefExpr *E) {
|
|
|
|
// If this is already a capture, just make sure we capture 'this'.
|
|
|
|
if (E->refersToEnclosingVariableOrCapture()) {
|
2015-09-09 05:15:22 +08:00
|
|
|
Captures.insert(ParentThis);
|
2015-04-09 06:23:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto *D = dyn_cast<VarDecl>(E->getDecl());
|
|
|
|
if (D && D->isLocalVarDeclOrParm() && D->hasLocalStorage())
|
2015-09-09 05:15:22 +08:00
|
|
|
Captures.insert(D);
|
2015-04-09 06:23:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void VisitCXXThisExpr(const CXXThisExpr *E) {
|
2015-09-09 05:15:22 +08:00
|
|
|
Captures.insert(ParentThis);
|
2015-04-09 06:23:48 +08:00
|
|
|
}
|
2015-07-07 08:36:30 +08:00
|
|
|
|
|
|
|
void VisitCallExpr(const CallExpr *E) {
|
|
|
|
// We only need to add parent frame allocations for these builtins in x86.
|
|
|
|
if (ParentCGF.getTarget().getTriple().getArch() != llvm::Triple::x86)
|
|
|
|
return;
|
|
|
|
|
|
|
|
unsigned ID = E->getBuiltinCallee();
|
|
|
|
switch (ID) {
|
|
|
|
case Builtin::BI__exception_code:
|
|
|
|
case Builtin::BI_exception_code:
|
|
|
|
// This is the simple case where we are the outermost finally. All we
|
|
|
|
// have to do here is make sure we escape this and recover it in the
|
|
|
|
// outlined handler.
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
if (!SEHCodeSlot.isValid())
|
2015-07-07 08:36:30 +08:00
|
|
|
SEHCodeSlot = ParentCGF.SEHCodeSlotStack.back();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2015-04-09 06:23:48 +08:00
|
|
|
};
|
2015-10-07 07:40:43 +08:00
|
|
|
} // end anonymous namespace
|
2015-04-09 06:23:48 +08:00
|
|
|
|
2015-12-16 08:26:37 +08:00
|
|
|
Address CodeGenFunction::recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
|
|
|
|
Address ParentVar,
|
|
|
|
llvm::Value *ParentFP) {
|
2015-07-07 08:36:30 +08:00
|
|
|
llvm::CallInst *RecoverCall = nullptr;
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
CGBuilderTy Builder(*this, AllocaInsertPt);
|
|
|
|
if (auto *ParentAlloca = dyn_cast<llvm::AllocaInst>(ParentVar.getPointer())) {
|
2015-07-07 08:36:30 +08:00
|
|
|
// Mark the variable escaped if nobody else referenced it and compute the
|
2015-07-08 06:26:07 +08:00
|
|
|
// localescape index.
|
2015-07-07 08:36:30 +08:00
|
|
|
auto InsertPair = ParentCGF.EscapedLocals.insert(
|
|
|
|
std::make_pair(ParentAlloca, ParentCGF.EscapedLocals.size()));
|
|
|
|
int FrameEscapeIdx = InsertPair.first->second;
|
2015-07-08 06:26:07 +08:00
|
|
|
// call i8* @llvm.localrecover(i8* bitcast(@parentFn), i8* %fp, i32 N)
|
2015-07-07 08:36:30 +08:00
|
|
|
llvm::Function *FrameRecoverFn = llvm::Intrinsic::getDeclaration(
|
2015-07-08 06:26:07 +08:00
|
|
|
&CGM.getModule(), llvm::Intrinsic::localrecover);
|
2015-07-07 08:36:30 +08:00
|
|
|
llvm::Constant *ParentI8Fn =
|
|
|
|
llvm::ConstantExpr::getBitCast(ParentCGF.CurFn, Int8PtrTy);
|
|
|
|
RecoverCall = Builder.CreateCall(
|
|
|
|
FrameRecoverFn, {ParentI8Fn, ParentFP,
|
|
|
|
llvm::ConstantInt::get(Int32Ty, FrameEscapeIdx)});
|
|
|
|
|
|
|
|
} else {
|
|
|
|
// If the parent didn't have an alloca, we're doing some nested outlining.
|
2015-07-08 06:26:07 +08:00
|
|
|
// Just clone the existing localrecover call, but tweak the FP argument to
|
2015-07-07 08:36:30 +08:00
|
|
|
// use our FP value. All other arguments are constants.
|
|
|
|
auto *ParentRecover =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
cast<llvm::IntrinsicInst>(ParentVar.getPointer()->stripPointerCasts());
|
2015-07-08 06:26:07 +08:00
|
|
|
assert(ParentRecover->getIntrinsicID() == llvm::Intrinsic::localrecover &&
|
|
|
|
"expected alloca or localrecover in parent LocalDeclMap");
|
2015-07-07 08:36:30 +08:00
|
|
|
RecoverCall = cast<llvm::CallInst>(ParentRecover->clone());
|
|
|
|
RecoverCall->setArgOperand(1, ParentFP);
|
|
|
|
RecoverCall->insertBefore(AllocaInsertPt);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Bitcast the variable, rename it, and insert it in the local decl map.
|
|
|
|
llvm::Value *ChildVar =
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Builder.CreateBitCast(RecoverCall, ParentVar.getType());
|
|
|
|
ChildVar->setName(ParentVar.getName());
|
|
|
|
return Address(ChildVar, ParentVar.getAlignment());
|
2015-07-07 08:36:30 +08:00
|
|
|
}
|
|
|
|
|
2015-04-09 06:23:48 +08:00
|
|
|
void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
|
2015-06-10 01:49:42 +08:00
|
|
|
const Stmt *OutlinedStmt,
|
2015-07-07 08:36:30 +08:00
|
|
|
bool IsFilter) {
|
2015-04-09 06:23:48 +08:00
|
|
|
// Find all captures in the Stmt.
|
|
|
|
CaptureFinder Finder(ParentCGF, ParentCGF.CXXABIThisDecl);
|
|
|
|
Finder.Visit(OutlinedStmt);
|
|
|
|
|
2015-07-07 08:36:30 +08:00
|
|
|
// We can exit early on x86_64 when there are no captures. We just have to
|
|
|
|
// save the exception code in filters so that __exception_code() works.
|
|
|
|
if (!Finder.foundCaptures() &&
|
|
|
|
CGM.getTarget().getTriple().getArch() != llvm::Triple::x86) {
|
|
|
|
if (IsFilter)
|
|
|
|
EmitSEHExceptionCodeSave(ParentCGF, nullptr, nullptr);
|
2015-04-09 06:23:48 +08:00
|
|
|
return;
|
2015-07-07 08:36:30 +08:00
|
|
|
}
|
2015-04-09 06:23:48 +08:00
|
|
|
|
2015-12-16 08:26:37 +08:00
|
|
|
llvm::Value *EntryFP = nullptr;
|
|
|
|
CGBuilderTy Builder(CGM, AllocaInsertPt);
|
2015-07-07 08:36:30 +08:00
|
|
|
if (IsFilter && CGM.getTarget().getTriple().getArch() == llvm::Triple::x86) {
|
|
|
|
// 32-bit SEH filters need to be careful about FP recovery. The end of the
|
|
|
|
// EH registration is passed in as the EBP physical register. We can
|
2015-12-16 08:26:37 +08:00
|
|
|
// recover that with llvm.frameaddress(1).
|
|
|
|
EntryFP = Builder.CreateCall(
|
2015-07-07 08:36:30 +08:00
|
|
|
CGM.getIntrinsic(llvm::Intrinsic::frameaddress), {Builder.getInt32(1)});
|
|
|
|
} else {
|
|
|
|
// Otherwise, for x64 and 32-bit finally functions, the parent FP is the
|
|
|
|
// second parameter.
|
|
|
|
auto AI = CurFn->arg_begin();
|
|
|
|
++AI;
|
2015-12-16 08:26:37 +08:00
|
|
|
EntryFP = &*AI;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *ParentFP = EntryFP;
|
|
|
|
if (IsFilter) {
|
|
|
|
// Given whatever FP the runtime provided us in EntryFP, recover the true
|
|
|
|
// frame pointer of the parent function. We only need to do this in filters,
|
|
|
|
// since finally funclets recover the parent FP for us.
|
|
|
|
llvm::Function *RecoverFPIntrin =
|
|
|
|
CGM.getIntrinsic(llvm::Intrinsic::x86_seh_recoverfp);
|
|
|
|
llvm::Constant *ParentI8Fn =
|
|
|
|
llvm::ConstantExpr::getBitCast(ParentCGF.CurFn, Int8PtrTy);
|
|
|
|
ParentFP = Builder.CreateCall(RecoverFPIntrin, {ParentI8Fn, EntryFP});
|
2015-07-07 08:36:30 +08:00
|
|
|
}
|
2015-04-09 06:23:48 +08:00
|
|
|
|
2015-07-08 06:26:07 +08:00
|
|
|
// Create llvm.localrecover calls for all captures.
|
2015-04-09 06:23:48 +08:00
|
|
|
for (const VarDecl *VD : Finder.Captures) {
|
|
|
|
if (isa<ImplicitParamDecl>(VD)) {
|
|
|
|
CGM.ErrorUnsupported(VD, "'this' captured by SEH");
|
|
|
|
CXXThisValue = llvm::UndefValue::get(ConvertTypeForMem(VD->getType()));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (VD->getType()->isVariablyModifiedType()) {
|
|
|
|
CGM.ErrorUnsupported(VD, "VLA captured by SEH");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
assert((isa<ImplicitParamDecl>(VD) || VD->isLocalVarDeclOrParm()) &&
|
|
|
|
"captured non-local variable");
|
|
|
|
|
2015-04-15 04:59:00 +08:00
|
|
|
// If this decl hasn't been declared yet, it will be declared in the
|
|
|
|
// OutlinedStmt.
|
|
|
|
auto I = ParentCGF.LocalDeclMap.find(VD);
|
|
|
|
if (I == ParentCGF.LocalDeclMap.end())
|
|
|
|
continue;
|
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Address ParentVar = I->second;
|
2015-12-16 08:26:37 +08:00
|
|
|
setAddrOfLocalVar(
|
|
|
|
VD, recoverAddrOfEscapedLocal(ParentCGF, ParentVar, ParentFP));
|
2015-07-07 08:36:30 +08:00
|
|
|
}
|
2015-07-02 01:10:10 +08:00
|
|
|
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
if (Finder.SEHCodeSlot.isValid()) {
|
2015-07-07 08:36:30 +08:00
|
|
|
SEHCodeSlotStack.push_back(
|
|
|
|
recoverAddrOfEscapedLocal(ParentCGF, Finder.SEHCodeSlot, ParentFP));
|
2015-07-02 14:10:53 +08:00
|
|
|
}
|
2015-07-07 08:36:30 +08:00
|
|
|
|
|
|
|
if (IsFilter)
|
2015-12-16 08:26:37 +08:00
|
|
|
EmitSEHExceptionCodeSave(ParentCGF, ParentFP, EntryFP);
|
2015-04-09 06:23:48 +08:00
|
|
|
}
|
|
|
|
|
2015-04-15 04:59:00 +08:00
|
|
|
/// Arrange a function prototype that can be called by Windows exception
|
|
|
|
/// handling personalities. On Win64, the prototype looks like:
|
|
|
|
/// RetTy func(void *EHPtrs, void *ParentFP);
|
|
|
|
void CodeGenFunction::startOutlinedSEHHelper(CodeGenFunction &ParentCGF,
|
2015-07-07 08:36:30 +08:00
|
|
|
bool IsFilter,
|
2015-04-15 04:59:00 +08:00
|
|
|
const Stmt *OutlinedStmt) {
|
2015-07-07 08:36:30 +08:00
|
|
|
SourceLocation StartLoc = OutlinedStmt->getLocStart();
|
|
|
|
|
|
|
|
// Get the mangled function name.
|
|
|
|
SmallString<128> Name;
|
|
|
|
{
|
|
|
|
llvm::raw_svector_ostream OS(Name);
|
2016-03-02 03:42:53 +08:00
|
|
|
const FunctionDecl *ParentSEHFn = ParentCGF.CurSEHParent;
|
|
|
|
assert(ParentSEHFn && "No CurSEHParent!");
|
2015-07-07 08:36:30 +08:00
|
|
|
MangleContext &Mangler = CGM.getCXXABI().getMangleContext();
|
|
|
|
if (IsFilter)
|
2016-03-02 03:42:53 +08:00
|
|
|
Mangler.mangleSEHFilterExpression(ParentSEHFn, OS);
|
2015-07-07 08:36:30 +08:00
|
|
|
else
|
2016-03-02 03:42:53 +08:00
|
|
|
Mangler.mangleSEHFinallyBlock(ParentSEHFn, OS);
|
2015-07-07 08:36:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
FunctionArgList Args;
|
|
|
|
if (CGM.getTarget().getTriple().getArch() != llvm::Triple::x86 || !IsFilter) {
|
|
|
|
// All SEH finally functions take two parameters. Win64 filters take two
|
|
|
|
// parameters. Win32 filters take no parameters.
|
|
|
|
if (IsFilter) {
|
|
|
|
Args.push_back(ImplicitParamDecl::Create(
|
2017-06-09 21:40:18 +08:00
|
|
|
getContext(), /*DC=*/nullptr, StartLoc,
|
2015-07-07 08:36:30 +08:00
|
|
|
&getContext().Idents.get("exception_pointers"),
|
2017-06-09 21:40:18 +08:00
|
|
|
getContext().VoidPtrTy, ImplicitParamDecl::Other));
|
2015-07-07 08:36:30 +08:00
|
|
|
} else {
|
|
|
|
Args.push_back(ImplicitParamDecl::Create(
|
2017-06-09 21:40:18 +08:00
|
|
|
getContext(), /*DC=*/nullptr, StartLoc,
|
2015-07-07 08:36:30 +08:00
|
|
|
&getContext().Idents.get("abnormal_termination"),
|
2017-06-09 21:40:18 +08:00
|
|
|
getContext().UnsignedCharTy, ImplicitParamDecl::Other));
|
2015-07-07 08:36:30 +08:00
|
|
|
}
|
|
|
|
Args.push_back(ImplicitParamDecl::Create(
|
2017-06-09 21:40:18 +08:00
|
|
|
getContext(), /*DC=*/nullptr, StartLoc,
|
|
|
|
&getContext().Idents.get("frame_pointer"), getContext().VoidPtrTy,
|
|
|
|
ImplicitParamDecl::Other));
|
2015-07-07 08:36:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
QualType RetTy = IsFilter ? getContext().LongTy : getContext().VoidTy;
|
|
|
|
|
2016-03-11 12:30:31 +08:00
|
|
|
const CGFunctionInfo &FnInfo =
|
|
|
|
CGM.getTypes().arrangeBuiltinFunctionDeclaration(RetTy, Args);
|
2015-04-15 04:59:00 +08:00
|
|
|
|
2015-04-14 04:04:22 +08:00
|
|
|
llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
|
2015-04-15 04:59:00 +08:00
|
|
|
llvm::Function *Fn = llvm::Function::Create(
|
|
|
|
FnTy, llvm::GlobalValue::InternalLinkage, Name.str(), &CGM.getModule());
|
2015-04-14 04:03:03 +08:00
|
|
|
|
2015-04-15 04:59:00 +08:00
|
|
|
IsOutlinedSEHHelper = true;
|
|
|
|
|
2015-04-14 04:04:22 +08:00
|
|
|
StartFunction(GlobalDecl(), RetTy, Fn, FnInfo, Args,
|
2015-04-15 04:59:00 +08:00
|
|
|
OutlinedStmt->getLocStart(), OutlinedStmt->getLocStart());
|
2016-03-02 03:42:53 +08:00
|
|
|
CurSEHParent = ParentCGF.CurSEHParent;
|
2015-04-14 04:03:03 +08:00
|
|
|
|
2015-04-15 04:59:00 +08:00
|
|
|
CGM.SetLLVMFunctionAttributes(nullptr, FnInfo, CurFn);
|
2015-07-07 08:36:30 +08:00
|
|
|
EmitCapturedLocals(ParentCGF, OutlinedStmt, IsFilter);
|
2015-04-15 04:59:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Create a stub filter function that will ultimately hold the code of the
|
|
|
|
/// filter expression. The EH preparation passes in LLVM will outline the code
|
|
|
|
/// from the main function body into this stub.
|
|
|
|
llvm::Function *
|
|
|
|
CodeGenFunction::GenerateSEHFilterFunction(CodeGenFunction &ParentCGF,
|
|
|
|
const SEHExceptStmt &Except) {
|
|
|
|
const Expr *FilterExpr = Except.getFilterExpr();
|
2015-07-07 08:36:30 +08:00
|
|
|
startOutlinedSEHHelper(ParentCGF, true, FilterExpr);
|
2015-04-14 04:04:22 +08:00
|
|
|
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
// Emit the original filter expression, convert to i32, and return.
|
|
|
|
llvm::Value *R = EmitScalarExpr(FilterExpr);
|
2015-04-17 14:57:25 +08:00
|
|
|
R = Builder.CreateIntCast(R, ConvertType(getContext().LongTy),
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
FilterExpr->getType()->isSignedIntegerType());
|
|
|
|
Builder.CreateStore(R, ReturnValue);
|
|
|
|
|
|
|
|
FinishFunction(FilterExpr->getLocEnd());
|
|
|
|
|
2015-04-15 04:59:00 +08:00
|
|
|
return CurFn;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Function *
|
|
|
|
CodeGenFunction::GenerateSEHFinallyFunction(CodeGenFunction &ParentCGF,
|
|
|
|
const SEHFinallyStmt &Finally) {
|
|
|
|
const Stmt *FinallyBlock = Finally.getBlock();
|
2015-07-07 08:36:30 +08:00
|
|
|
startOutlinedSEHHelper(ParentCGF, false, FinallyBlock);
|
2015-04-15 04:59:00 +08:00
|
|
|
|
|
|
|
// Emit the original filter expression, convert to i32, and return.
|
|
|
|
EmitStmt(FinallyBlock);
|
|
|
|
|
|
|
|
FinishFunction(FinallyBlock->getLocEnd());
|
|
|
|
|
|
|
|
return CurFn;
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
}
|
|
|
|
|
2015-07-07 08:36:30 +08:00
|
|
|
void CodeGenFunction::EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
|
|
|
|
llvm::Value *ParentFP,
|
2015-12-16 08:26:37 +08:00
|
|
|
llvm::Value *EntryFP) {
|
2015-07-07 08:36:30 +08:00
|
|
|
// Get the pointer to the EXCEPTION_POINTERS struct. This is returned by the
|
|
|
|
// __exception_info intrinsic.
|
|
|
|
if (CGM.getTarget().getTriple().getArch() != llvm::Triple::x86) {
|
|
|
|
// On Win64, the info is passed as the first parameter to the filter.
|
2015-11-07 07:00:41 +08:00
|
|
|
SEHInfo = &*CurFn->arg_begin();
|
2015-07-07 08:36:30 +08:00
|
|
|
SEHCodeSlotStack.push_back(
|
|
|
|
CreateMemTemp(getContext().IntTy, "__exception_code"));
|
|
|
|
} else {
|
|
|
|
// On Win32, the EBP on entry to the filter points to the end of an
|
|
|
|
// exception registration object. It contains 6 32-bit fields, and the info
|
|
|
|
// pointer is stored in the second field. So, GEP 20 bytes backwards and
|
|
|
|
// load the pointer.
|
2015-12-16 08:26:37 +08:00
|
|
|
SEHInfo = Builder.CreateConstInBoundsGEP1_32(Int8Ty, EntryFP, -20);
|
2015-07-07 08:36:30 +08:00
|
|
|
SEHInfo = Builder.CreateBitCast(SEHInfo, Int8PtrTy->getPointerTo());
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
SEHInfo = Builder.CreateAlignedLoad(Int8PtrTy, SEHInfo, getPointerAlign());
|
2015-07-07 08:36:30 +08:00
|
|
|
SEHCodeSlotStack.push_back(recoverAddrOfEscapedLocal(
|
|
|
|
ParentCGF, ParentCGF.SEHCodeSlotStack.back(), ParentFP));
|
|
|
|
}
|
|
|
|
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
// Save the exception code in the exception slot to unify exception access in
|
|
|
|
// the filter function and the landing pad.
|
|
|
|
// struct EXCEPTION_POINTERS {
|
|
|
|
// EXCEPTION_RECORD *ExceptionRecord;
|
|
|
|
// CONTEXT *ContextRecord;
|
|
|
|
// };
|
2015-07-07 08:36:30 +08:00
|
|
|
// int exceptioncode = exception_pointers->ExceptionRecord->ExceptionCode;
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
llvm::Type *RecordTy = CGM.Int32Ty->getPointerTo();
|
2017-05-10 03:31:30 +08:00
|
|
|
llvm::Type *PtrsTy = llvm::StructType::get(RecordTy, CGM.VoidPtrTy);
|
2015-07-07 08:36:30 +08:00
|
|
|
llvm::Value *Ptrs = Builder.CreateBitCast(SEHInfo, PtrsTy->getPointerTo());
|
2015-04-06 06:45:47 +08:00
|
|
|
llvm::Value *Rec = Builder.CreateStructGEP(PtrsTy, Ptrs, 0);
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
Rec = Builder.CreateAlignedLoad(Rec, getPointerAlign());
|
|
|
|
llvm::Value *Code = Builder.CreateAlignedLoad(Rec, getIntAlign());
|
2015-07-07 08:36:30 +08:00
|
|
|
assert(!SEHCodeSlotStack.empty() && "emitting EH code outside of __except");
|
|
|
|
Builder.CreateStore(Code, SEHCodeSlotStack.back());
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *CodeGenFunction::EmitSEHExceptionInfo() {
|
|
|
|
// Sema should diagnose calling this builtin outside of a filter context, but
|
|
|
|
// don't crash if we screw up.
|
2015-07-07 08:36:30 +08:00
|
|
|
if (!SEHInfo)
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
return llvm::UndefValue::get(Int8PtrTy);
|
2015-07-07 08:36:30 +08:00
|
|
|
assert(SEHInfo->getType() == Int8PtrTy);
|
|
|
|
return SEHInfo;
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Value *CodeGenFunction::EmitSEHExceptionCode() {
|
2015-07-07 08:36:30 +08:00
|
|
|
assert(!SEHCodeSlotStack.empty() && "emitting EH code outside of __except");
|
Compute and preserve alignment more faithfully in IR-generation.
Introduce an Address type to bundle a pointer value with an
alignment. Introduce APIs on CGBuilderTy to work with Address
values. Change core APIs on CGF/CGM to traffic in Address where
appropriate. Require alignments to be non-zero. Update a ton
of code to compute and propagate alignment information.
As part of this, I've promoted CGBuiltin's EmitPointerWithAlignment
helper function to CGF and made use of it in a number of places in
the expression emitter.
The end result is that we should now be significantly more correct
when performing operations on objects that are locally known to
be under-aligned. Since alignment is not reliably tracked in the
type system, there are inherent limits to this, but at least we
are no longer confused by standard operations like derived-to-base
conversions and array-to-pointer decay. I've also fixed a large
number of bugs where we were applying the complete-object alignment
to a pointer instead of the non-virtual alignment, although most of
these were hidden by the very conservative approach we took with
member alignment.
Also, because IRGen now reliably asserts on zero alignments, we
should no longer be subject to an absurd but frustrating recurring
bug where an incomplete type would report a zero alignment and then
we'd naively do a alignmentAtOffset on it and emit code using an
alignment equal to the largest power-of-two factor of the offset.
We should also now be emitting much more aggressive alignment
attributes in the presence of over-alignment. In particular,
field access now uses alignmentAtOffset instead of min.
Several times in this patch, I had to change the existing
code-generation pattern in order to more effectively use
the Address APIs. For the most part, this seems to be a strict
improvement, like doing pointer arithmetic with GEPs instead of
ptrtoint. That said, I've tried very hard to not change semantics,
but it is likely that I've failed in a few places, for which I
apologize.
ABIArgInfo now always carries the assumed alignment of indirect and
indirect byval arguments. In order to cut down on what was already
a dauntingly large patch, I changed the code to never set align
attributes in the IR on non-byval indirect arguments. That is,
we still generate code which assumes that indirect arguments have
the given alignment, but we don't express this information to the
backend except where it's semantically required (i.e. on byvals).
This is likely a minor regression for those targets that did provide
this information, but it'll be trivial to add it back in a later
patch.
I partially punted on applying this work to CGBuiltin. Please
do not add more uses of the CreateDefaultAligned{Load,Store}
APIs; they will be going away eventually.
llvm-svn: 246985
2015-09-08 16:05:57 +08:00
|
|
|
return Builder.CreateLoad(SEHCodeSlotStack.back());
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
}
|
|
|
|
|
2015-02-05 06:37:07 +08:00
|
|
|
llvm::Value *CodeGenFunction::EmitSEHAbnormalTermination() {
|
2015-04-15 04:59:00 +08:00
|
|
|
// Abnormal termination is just the first parameter to the outlined finally
|
|
|
|
// helper.
|
|
|
|
auto AI = CurFn->arg_begin();
|
|
|
|
return Builder.CreateZExt(&*AI, Int32Ty);
|
2015-02-05 06:37:07 +08:00
|
|
|
}
|
|
|
|
|
2015-04-15 04:59:00 +08:00
|
|
|
void CodeGenFunction::EnterSEHTryStmt(const SEHTryStmt &S) {
|
|
|
|
CodeGenFunction HelperCGF(CGM, /*suppressNewContext=*/true);
|
|
|
|
if (const SEHFinallyStmt *Finally = S.getFinallyHandler()) {
|
2015-07-07 08:36:30 +08:00
|
|
|
// Outline the finally block.
|
2015-04-15 04:59:00 +08:00
|
|
|
llvm::Function *FinallyFunc =
|
|
|
|
HelperCGF.GenerateSEHFinallyFunction(*this, *Finally);
|
2015-07-07 08:36:30 +08:00
|
|
|
|
|
|
|
// Push a cleanup for __finally blocks.
|
2015-10-09 05:14:56 +08:00
|
|
|
EHStack.pushCleanup<PerformSEHFinally>(NormalAndEHCleanup, FinallyFunc);
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we must have an __except block.
|
2015-04-15 04:59:00 +08:00
|
|
|
const SEHExceptStmt *Except = S.getExceptHandler();
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
assert(Except);
|
|
|
|
EHCatchScope *CatchScope = EHStack.pushCatch(1);
|
2015-07-07 08:36:30 +08:00
|
|
|
SEHCodeSlotStack.push_back(
|
|
|
|
CreateMemTemp(getContext().IntTy, "__exception_code"));
|
2015-01-22 10:25:56 +08:00
|
|
|
|
2015-07-07 08:36:30 +08:00
|
|
|
// If the filter is known to evaluate to 1, then we can use the clause
|
|
|
|
// "catch i8* null". We can't do this on x86 because the filter has to save
|
|
|
|
// the exception code.
|
2015-01-22 10:25:56 +08:00
|
|
|
llvm::Constant *C =
|
|
|
|
CGM.EmitConstantExpr(Except->getFilterExpr(), getContext().IntTy, this);
|
2015-07-07 08:36:30 +08:00
|
|
|
if (CGM.getTarget().getTriple().getArch() != llvm::Triple::x86 && C &&
|
|
|
|
C->isOneValue()) {
|
2015-01-22 10:25:56 +08:00
|
|
|
CatchScope->setCatchAllHandler(0, createBasicBlock("__except"));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// In general, we have to emit an outlined filter function. Use the function
|
|
|
|
// in place of the RTTI typeinfo global that C++ EH uses.
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
llvm::Function *FilterFunc =
|
2015-04-15 04:59:00 +08:00
|
|
|
HelperCGF.GenerateSEHFilterFunction(*this, *Except);
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
llvm::Constant *OpaqueFunc =
|
|
|
|
llvm::ConstantExpr::getBitCast(FilterFunc, Int8PtrTy);
|
2015-09-17 05:06:09 +08:00
|
|
|
CatchScope->setHandler(0, OpaqueFunc, createBasicBlock("__except.ret"));
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
}
|
|
|
|
|
2015-04-15 04:59:00 +08:00
|
|
|
void CodeGenFunction::ExitSEHTryStmt(const SEHTryStmt &S) {
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
// Just pop the cleanup if it's a __finally block.
|
2015-04-15 04:59:00 +08:00
|
|
|
if (S.getFinallyHandler()) {
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
PopCleanupBlock();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we must have an __except block.
|
2015-02-05 06:37:07 +08:00
|
|
|
const SEHExceptStmt *Except = S.getExceptHandler();
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
assert(Except && "__try must have __finally xor __except");
|
|
|
|
EHCatchScope &CatchScope = cast<EHCatchScope>(*EHStack.begin());
|
|
|
|
|
|
|
|
// Don't emit the __except block if the __try block lacked invokes.
|
|
|
|
// TODO: Model unwind edges from instructions, either with iload / istore or
|
|
|
|
// a try body function.
|
|
|
|
if (!CatchScope.hasEHBranches()) {
|
|
|
|
CatchScope.clearHandlerBlocks();
|
|
|
|
EHStack.popCatch();
|
2015-07-07 08:36:30 +08:00
|
|
|
SEHCodeSlotStack.pop_back();
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The fall-through block.
|
|
|
|
llvm::BasicBlock *ContBB = createBasicBlock("__try.cont");
|
|
|
|
|
|
|
|
// We just emitted the body of the __try; jump to the continue block.
|
|
|
|
if (HaveInsertPoint())
|
|
|
|
Builder.CreateBr(ContBB);
|
|
|
|
|
|
|
|
// Check if our filter function returned true.
|
|
|
|
emitCatchDispatchBlock(*this, CatchScope);
|
|
|
|
|
|
|
|
// Grab the block before we pop the handler.
|
2015-12-12 13:39:21 +08:00
|
|
|
llvm::BasicBlock *CatchPadBB = CatchScope.getHandler(0).Block;
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
EHStack.popCatch();
|
|
|
|
|
2015-12-12 13:39:21 +08:00
|
|
|
EmitBlockAfterUses(CatchPadBB);
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
|
2015-10-08 09:13:52 +08:00
|
|
|
// __except blocks don't get outlined into funclets, so immediately do a
|
|
|
|
// catchret.
|
|
|
|
llvm::CatchPadInst *CPI =
|
|
|
|
cast<llvm::CatchPadInst>(CatchPadBB->getFirstNonPHI());
|
2015-12-12 13:39:21 +08:00
|
|
|
llvm::BasicBlock *ExceptBB = createBasicBlock("__except");
|
2015-10-08 09:13:52 +08:00
|
|
|
Builder.CreateCatchRet(CPI, ExceptBB);
|
|
|
|
EmitBlock(ExceptBB);
|
|
|
|
|
|
|
|
// On Win64, the exception code is returned in EAX. Copy it into the slot.
|
|
|
|
if (CGM.getTarget().getTriple().getArch() != llvm::Triple::x86) {
|
|
|
|
llvm::Function *SEHCodeIntrin =
|
|
|
|
CGM.getIntrinsic(llvm::Intrinsic::eh_exceptioncode);
|
|
|
|
llvm::Value *Code = Builder.CreateCall(SEHCodeIntrin, {CPI});
|
|
|
|
Builder.CreateStore(Code, SEHCodeSlotStack.back());
|
2015-07-07 08:36:30 +08:00
|
|
|
}
|
|
|
|
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
// Emit the __except body.
|
|
|
|
EmitStmt(Except->getBlock());
|
|
|
|
|
2015-07-07 08:36:30 +08:00
|
|
|
// End the lifetime of the exception code.
|
|
|
|
SEHCodeSlotStack.pop_back();
|
|
|
|
|
2015-01-31 06:16:45 +08:00
|
|
|
if (HaveInsertPoint())
|
|
|
|
Builder.CreateBr(ContBB);
|
Initial support for Win64 SEH IR emission
The lowering looks a lot like normal EH lowering, with the exception
that the exceptions are caught by executing filter expression code
instead of matching typeinfo globals. The filter expressions are
outlined into functions which are used in landingpad clauses where
typeinfo would normally go.
Major aspects that still need work:
- Non-call exceptions in __try bodies won't work yet. The plan is to
outline the __try block in the frontend to keep things simple.
- Filter expressions cannot use local variables until capturing is
implemented.
- __finally blocks will not run after exceptions. Fixing this requires
work in the LLVM SEH preparation pass.
The IR lowering looks like this:
// C code:
bool safe_div(int n, int d, int *r) {
__try {
*r = normal_div(n, d);
} __except(_exception_code() == EXCEPTION_INT_DIVIDE_BY_ZERO) {
return false;
}
return true;
}
; LLVM IR:
define i32 @filter(i8* %e, i8* %fp) {
%ehptrs = bitcast i8* %e to i32**
%ehrec = load i32** %ehptrs
%code = load i32* %ehrec
%matches = icmp eq i32 %code, i32 u0xC0000094
%matches.i32 = zext i1 %matches to i32
ret i32 %matches.i32
}
define i1 zeroext @safe_div(i32 %n, i32 %d, i32* %r) {
%rr = invoke i32 @normal_div(i32 %n, i32 %d)
to label %normal unwind to label %lpad
normal:
store i32 %rr, i32* %r
ret i1 1
lpad:
%ehvals = landingpad {i8*, i32} personality i32 (...)* @__C_specific_handler
catch i8* bitcast (i32 (i8*, i8*)* @filter to i8*)
%ehptr = extractvalue {i8*, i32} %ehvals, i32 0
%sel = extractvalue {i8*, i32} %ehvals, i32 1
%filter_sel = call i32 @llvm.eh.seh.typeid.for(i8* bitcast (i32 (i8*, i8*)* @filter to i8*))
%matches = icmp eq i32 %sel, %filter_sel
br i1 %matches, label %eh.except, label %eh.resume
eh.except:
ret i1 false
eh.resume:
resume
}
Reviewers: rjmccall, rsmith, majnemer
Differential Revision: http://reviews.llvm.org/D5607
llvm-svn: 226760
2015-01-22 09:36:17 +08:00
|
|
|
|
|
|
|
EmitBlock(ContBB);
|
2013-09-17 05:46:30 +08:00
|
|
|
}
|
2014-07-07 08:12:30 +08:00
|
|
|
|
|
|
|
void CodeGenFunction::EmitSEHLeaveStmt(const SEHLeaveStmt &S) {
|
2015-02-13 07:16:11 +08:00
|
|
|
// If this code is reachable then emit a stop point (if generating
|
|
|
|
// debug info). We have to do this ourselves because we are on the
|
|
|
|
// "simple" statement path.
|
|
|
|
if (HaveInsertPoint())
|
|
|
|
EmitStopPoint(&S);
|
|
|
|
|
2015-04-15 04:59:00 +08:00
|
|
|
// This must be a __leave from a __finally block, which we warn on and is UB.
|
|
|
|
// Just emit unreachable.
|
|
|
|
if (!isSEHTryScope()) {
|
|
|
|
Builder.CreateUnreachable();
|
|
|
|
Builder.ClearInsertionPoint();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-02-13 07:16:11 +08:00
|
|
|
EmitBranchThroughCleanup(*SEHTryEpilogueStack.back());
|
2014-07-07 08:12:30 +08:00
|
|
|
}
|